repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
zoebchhatriwala/MachineLearningBasics | MachineLearning#5/main.py | 1 | 1321 | from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
from scipy.spatial import distance
# Finds Euclidean Distance
def euc(a, b):
return distance.euclidean(a, b)
# New Classifier
class NewClassifier:
x_train = []
y_train = []
def fit(self, x_train, y_train):
self.x_train = x_train
self.y_train = y_train
def predict(self, x_test):
prediction = []
for row in x_test:
label = self.closest(row)
prediction.append(label)
return prediction
def closest(self, row):
best_dist = euc(row, self.x_train[0])
best_index = 0
for i in range(1, len(self.x_train)):
dist = euc(row, self.x_train[i])
if dist < best_dist:
best_dist = dist
best_index = i
return self.y_train[best_index]
# Main Method
def main():
iris = datasets.load_iris()
x = iris.data
y = iris.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.5)
clr = NewClassifier()
clr.fit(x_train, y_train)
prediction = clr.predict(x_test)
# Prediction accuracy
print("Accuracy: " + str(accuracy_score(y_test, prediction) * 100) + "%")
# Run main
main() | gpl-3.0 |
sniemi/SamPy | sandbox/src1/examples/major_minor_demo1.py | 4 | 1660 | #!/usr/bin/env python
"""
Demonstrate how to use major and minor tickers.
The two relevant userland classes are Locators and Formatters.
Locators determine where the ticks are and formatters control the
formatting of ticks.
Minor ticks are off by default (NullLocator and NullFormatter). You
can turn minor ticks on w/o labels by setting the minor locator. You
can also turn labeling on for the minor ticker by setting the minor
formatter
Make a plot with major ticks that are multiples of 20 and minor ticks
that are multiples of 5. Label major ticks with %d formatting but
don't label minor ticks
The MultipleLocator ticker class is used to place ticks on multiples of
some base. The FormatStrFormatter uses a string format string (eg
'%d' or '%1.2f' or '%1.1f cm' ) to format the tick
The pylab interface grid command chnages the grid settings of the
major ticks of the y and y axis together. If you want to control the
grid of the minor ticks for a given axis, use for example
ax.xaxis.grid(True, which='minor')
Note, you should not use the same locator between different Axis
because the locator stores references to the Axis data and view limits
"""
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
majorLocator = MultipleLocator(20)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(5)
t = arange(0.0, 100.0, 0.1)
s = sin(0.1*pi*t)*exp(-t*0.01)
ax = subplot(111)
plot(t,s)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_major_formatter(majorFormatter)
#for the minor ticks, use no labels; default NullFormatter
ax.xaxis.set_minor_locator(minorLocator)
show()
| bsd-2-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/io/parser/test_parsers.py | 7 | 3122 | # -*- coding: utf-8 -*-
import os
import pandas.util.testing as tm
from pandas import read_csv, read_table
from pandas.core.common import AbstractMethodError
from .common import ParserTests
from .header import HeaderTests
from .comment import CommentTests
from .dialect import DialectTests
from .quoting import QuotingTests
from .usecols import UsecolsTests
from .skiprows import SkipRowsTests
from .index_col import IndexColTests
from .na_values import NAvaluesTests
from .converters import ConverterTests
from .c_parser_only import CParserTests
from .parse_dates import ParseDatesTests
from .compression import CompressionTests
from .multithread import MultithreadTests
from .python_parser_only import PythonParserTests
from .dtypes import DtypeTests
class BaseParser(CommentTests, CompressionTests,
ConverterTests, DialectTests,
HeaderTests, IndexColTests,
MultithreadTests, NAvaluesTests,
ParseDatesTests, ParserTests,
SkipRowsTests, UsecolsTests,
QuotingTests, DtypeTests):
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def float_precision_choices(self):
raise AbstractMethodError(self)
def setup_method(self, method):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
self.csv_shiftjs = os.path.join(self.dirpath, 'sauron.SHIFT_JIS.csv')
class TestCParserHighMemory(BaseParser, CParserTests):
engine = 'c'
low_memory = False
float_precision_choices = [None, 'high', 'round_trip']
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = self.low_memory
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = self.low_memory
return read_table(*args, **kwds)
class TestCParserLowMemory(BaseParser, CParserTests):
engine = 'c'
low_memory = True
float_precision_choices = [None, 'high', 'round_trip']
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = self.low_memory
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = True
return read_table(*args, **kwds)
class TestPythonParser(BaseParser, PythonParserTests):
engine = 'python'
float_precision_choices = [None]
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
return read_table(*args, **kwds)
| mit |
Solthis/Fugen-2.0 | template_processor/base_template_processor.py | 1 | 8312 | # coding: utf-8
# Copyright 2017 Solthis.
#
# This file is part of Fugen 2.0.
#
# Fugen 2.0 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fugen 2.0 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fugen 2.0. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import json
import traceback
from collections import OrderedDict
import numpy as np
import pandas as pd
from PySide.QtCore import QObject, Signal
from data.indicators import INDICATORS_REGISTRY, ArvStartedPatients,\
PatientIndicator
import utils
class BaseTemplateProcessor(QObject):
"""
Abstract base class for report template processing.
"""
update_progress = Signal(int)
error = Signal(str)
def __init__(self, fuchia_database):
QObject.__init__(self)
self._fuchia_database = fuchia_database
self._arv_started = ArvStartedPatients(self._fuchia_database)
self._report_widget = None
self._start_date = None
self._end_date = None
self.last_values = OrderedDict()
self.last_template_values = {}
@property
def fuchia_database(self):
return self._fuchia_database
@fuchia_database.setter
def fuchia_database(self, value):
self._fuchia_database = value
self._arv_started = ArvStartedPatients(self._fuchia_database)
def get_cell_content(self, i, j):
raise NotImplementedError()
def get_cell_members(self, i, j):
regex = "^\{(.+?)\}$"
content = self.get_cell_content(i, j)
if pd.isnull(content):
return None
res = re.match(regex, content)
if not res:
return None
content = re.sub(r"{\s*'?(\w)", r'{"\1', content)
content = re.sub(r",\s*'?(\w)", r',"\1', content)
content = re.sub(r"(\w)'?\s*:", r'\1":', content)
content = re.sub(r":\s*'(\w+)'\s*([,}])", r':"\1"\2', content)
return json.loads(content)
def get_cell_indicator(self, i, j):
cell_members = self.get_cell_members(i, j)
if not cell_members:
return None
key = cell_members['key']
indicator = INDICATORS_REGISTRY[key]['class'](self.fuchia_database)
return indicator
def get_cell_parameters(self, i, j):
cell_members = self.get_cell_members(i, j)
if not cell_members:
return None
parameters = cell_members
parameters.pop('key')
return parameters
def get_cell_parameters_key(self, i, j):
params = self.get_cell_parameters(i, j)
if params is None:
return None
gender = params.get('gender', None)
age_min = params.get('age_min', None)
age_max = params.get('age_max', None)
age_ns = params.get('age_is_null', None)
a = utils.get_gender_str(gender)
b = utils.get_age_range_str(age_min, age_max, age_ns)
if a is None and b is None:
return "Tous les patients"
return ' '.join([i for i in [a, b] if i is not None])
def get_cell_value(self, start_date, end_date, i, j):
indicator = self.get_cell_indicator(i, j)
kwargs = self.get_cell_parameters(i, j)
if not indicator:
return None
kwargs['start_date'] = start_date
if isinstance(indicator, PatientIndicator) and indicator.under_arv():
arv = self._arv_started.get_filtered_by_category(
end_date,
**kwargs
)
kwargs['post_filter_index'] = arv.index
value = indicator.get_value(end_date, **kwargs)
return value
def get_cell_patient_codes(self, start_date, end_date, i, j):
indicator = self.get_cell_indicator(i, j)
kwargs = self.get_cell_parameters(i, j)
if not indicator:
return None
kwargs['start_date'] = start_date
if isinstance(indicator, PatientIndicator) and indicator.under_arv():
arv = self._arv_started.get_filtered_by_category(
end_date,
**kwargs
)
kwargs['post_filter_index'] = arv.index
patients = indicator.get_filtered_by_category(end_date, **kwargs)
return patients['patient_code']
def get_cell_values(self, start_date, end_date):
self.last_values = OrderedDict()
self.last_template_values = {}
matrix = np.empty(
(self.get_row_number(), self.get_column_number()),
dtype=object
)
matrix[:] = np.NAN
profile = {}
total = 0
progress = 0
step = 10
curr = 0
import time
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
t = time.time()
indicator = self.get_cell_indicator(i, j)
matrix[i, j] = self.get_cell_value(start_date, end_date, i, j)
if indicator is not None:
self.last_template_values[(i, j)] = matrix[i, j]
if isinstance(indicator, PatientIndicator):
params_key = self.get_cell_parameters_key(i, j)
patient_codes = self.get_cell_patient_codes(
start_date, end_date,
i, j
)
i_k = indicator.get_key()
if i_k not in self.last_values:
self.last_values[i_k] = OrderedDict()
self.last_values[i_k][params_key] = patient_codes
tt = time.time() - t
if indicator not in profile:
profile[indicator] = 0
profile[indicator] += tt
total += tt
curr += 1
if curr == step:
progress += step
curr = 1
self.update_progress.emit(progress)
# import pprint
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(profile)
# print("Total : {:2f}".format(total))
return matrix
def set_run_params(self, report_widget, start_date, end_date):
self._report_widget = report_widget
self._start_date = start_date
self._end_date = end_date
def run(self):
try:
b1 = self._report_widget is not None
b2 = self._start_date is not None
b3 = self._end_date is not None
b = b1 and b2 and b3
if not b:
return
values = self.get_cell_values(
self._start_date,
self._end_date
)
self._report_widget.set_values(values)
except:
excType, excValue, tracebackobj = sys.exc_info()
tb_list = traceback.format_exception(excType,
excValue,
tracebackobj)
tb_str = ''.join(tb_list)
self.error.emit(tb_str)
def get_column_number(self):
raise NotImplementedError()
def get_row_number(self):
raise NotImplementedError()
def get_merged_cell_ranges(self):
"""
:return: A list containing the ranges of merged cells. A range is
composed with two couples, describing the up left cell and the down
right cell. e.g. ((1, 1), (1, 2)).
"""
return []
def get_cell_style(self, i, j):
"""
:return: If a style is available for a given cell, return a dict,
with style information about font, fill and stroke.
"""
return None
def get_column_width(self, j):
return None
def get_row_height(self, i):
return None
def export_to_excel(self, destination_path):
raise NotImplementedError()
| gpl-3.0 |
LamaHamadeh/Microsoft-DAT210x | Module 5/assignment1.py | 1 | 3075 | #author Lama Hamadeh
# TOOD: Import whatever needs to be imported to make this work
#
# .. your code here ..
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
matplotlib.style.use('ggplot') # Look Pretty
#
# TODO: To procure the dataset, follow these steps:
# 1. Navigate to: https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2
# 2. In the 'Primary Type' column, click on the 'Menu' button next to the info button,
# and select 'Filter This Column'. It might take a second for the filter option to
# show up, since it has to load the entire list first.
# 3. Scroll down to 'GAMBLING'
# 4. Click the light blue 'Export' button next to the 'Filter' button, and select 'Download As CSV'
#
# TODO: Load your dataset after importing Pandas
#
# .. your code here ..
df1=pd.read_csv('/Users/lamahamadeh/Downloads/Modules/DAT210x-master/Module5/Datasets/Crimes_-_2001_to_present.csv', index_col=0)
#
# TODO: Drop any ROWs with nans in them
#
# .. your code here ..
df1.dropna(axis = 0, how = 'any', inplace = True)
#
# TODO: Print out the dtypes of your dset
#
# .. your code here ..
print(df1.dtypes)
#
# Coerce the 'Date' feature (which is currently a string object) into real date,
# and confirm by re-printing the dtypes. NOTE: This is a slow process...
#
# .. your code here ..
df1.Date = pd.to_datetime(df1.Date) # Converts the entries in the 'Date' column to datetime64[ns]
print (df1.dtypes)
def doKMeans(df):
# TODO: Filter df so that you're only looking at Longitude and Latitude,
# since the remaining columns aren't really applicable for this purpose.
#
# .. your code here ..
df=df[['Longitude','Latitude']]
#
# INFO: Plot your data with a '.' marker, with 0.3 alpha at the Longitude,
# and Latitude locations in your dataset. Longitude = x, Latitude = y
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x=df.Longitude, y=df.Latitude, marker='.', alpha=0.5, s = 30)
#
# TODO: Use K-Means to try and find seven cluster centers in this df.
#
# .. your code here ..
kmeans_model = KMeans(n_clusters=7, init = 'random', n_init = 60, max_iter = 360, random_state = 43)
kmeans_model.fit(df)
labels = kmeans_model.predict(df)
# INFO: Print and plot the centroids...
centroids = kmeans_model.cluster_centers_
ax.scatter(centroids[:,0], centroids[:,1], marker='x', c='red', alpha=0.9, linewidths=3, s=250)
print (centroids)
# INFO: Print & Plot your data
doKMeans(df1)
plt.title("For all dataframe dates")
plt.show()
#
# TODO: Filter out the data so that it only contains samples that have
# a Date > '2011-01-01', using indexing. Then, in a new figure, plot the
# crime incidents, as well as a new K-Means run's centroids.
#
# .. your code here ..
df2 = df1[df1.Date > '2011-01-01']
# INFO: Print & Plot your data
doKMeans(df2)
plt.title("Dates limited to 2011-01-01 and later")
plt.show()
| mit |
mljar/mljar-examples | compare_Tensorflow_Sckit_Learn/benchmark_classification.py | 1 | 2149 | import os
import time
import json
import numpy as np
import pandas as pd
from supervised.automl import AutoML
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from pmlb import fetch_data, classification_dataset_names
results = json.load(open("results.json"))
for classification_dataset in classification_dataset_names:
X, y = fetch_data(classification_dataset, return_X_y=True)
if X.shape[0] < 1000:
continue
if classification_dataset in [r["dataset"] for r in results]:
continue
print(classification_dataset, X.shape, y[:5], np.unique(y))
train_X, test_X, train_y, test_y = train_test_split(
X, y, test_size=0.25, stratify=y, random_state=12
)
ml_task = "binary_classification"
if len(np.unique(y)) > 2:
ml_task = "multiclass_classification"
mlp = AutoML(
algorithms=["MLP"],
mode="Perform",
explain_level=0,
train_ensemble=False,
golden_features=False,
features_selection=False,
ml_task=ml_task,
)
nn = AutoML(
algorithms=["Neural Network"],
mode="Perform",
explain_level=0,
train_ensemble=False,
golden_features=False,
features_selection=False,
ml_task=ml_task,
)
mlp.fit(train_X, train_y)
mlp_time = np.round(time.time() - mlp._start_time, 2)
nn.fit(train_X, train_y)
nn_time = np.round(time.time() - nn._start_time, 2)
mlp_ll = log_loss(test_y, mlp.predict_proba(test_X))
nn_ll = log_loss(test_y, nn.predict_proba(test_X))
print(classification_dataset, X.shape, np.unique(y), mlp_ll, nn_ll)
results += [
{
"dataset": classification_dataset,
"nrows": X.shape[0],
"ncols": X.shape[1],
"n_classes": len(np.unique(y)),
"mlp_logloss": mlp_ll,
"nn_logloss": nn_ll,
"mlp_time": mlp_time,
"nn_time": nn_time,
}
]
with open("results.json", "w") as fout:
fout.write(json.dumps(results, indent=4))
| apache-2.0 |
xiaocanli/stochastic-parker | python/local_dist.py | 1 | 21364 | #!/usr/bin/env python3
"""
Analysis procedures for local distribution
"""
from __future__ import print_function
import argparse
import itertools
import json
import multiprocessing
import sys
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from joblib import Parallel, delayed
from matplotlib.colors import LogNorm
import particle_trajectory as traj
from sde_util import load_mhd_config, mkdir_p
sys.path.insert(0, '/users/xiaocanli/Git/mhd_analysis_sli')
import mhd_data
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
mpl.rc('text', usetex=True)
mpl.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
FONT = {'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 24}
def read_az(mhd_run_dir, nx, ny, tframe):
"""Read the out-of-plane current density
Args:
mhd_run_dir: MHD run directory
nx, ny: data dimensions
tframe: time frame
"""
az = np.zeros((nx, ny))
fname = mhd_run_dir + 'data/Ay.gda'
az = np.memmap(fname, dtype='float32', mode='r',
offset=nx*ny*tframe*4, shape=(nx, ny))
return az
def find_nearest(array, value):
"""Find nearest value in an array
"""
idx = (np.abs(array-value)).argmin()
return (idx, array[idx])
def reduce_local_dist(plot_config, mhd_config, show_plot=True):
"""Reduce local particle spatial distribution
Args:
plot_config: plot configuration in dictionary
mhd_config: MHD simulation configuration
"""
run_name = plot_config["run_name"]
run_type = plot_config["run_type"]
tframe = plot_config["tframe"]
# Normalization parameters depend on runs
if run_type == "Fan-Early":
rho0 = 1.2E10 # cm^-3
L0 = 5.0E9 # in cm
nrx, nry, nrp = 4, 2, 4
elif run_type == "Bin-Fan":
rho0 = 1.0E9 # cm^-3
L0 = 6.2E9 # in cm
nrx, nry, nrp = 1, 1, 4
elif run_type == "Harris_UMN":
rho0 = 1.0E9 # cm^-3
L0 = 2.5E9 # in cm
nrx, nry, nrp = 4, 4, 1
# Dimensions for local spectra
nx, = plot_config["nx"]
ny, = plot_config["ny"]
nxr = nx // nrx
nyr = ny // nry
xstart = nxr//2
# Read and reduce the local energy spectra
tframe_str = str(tframe).zfill(4)
fname = '../data/' + run_name + '/fp_local_' + tframe_str + '_sum.dat'
fdata = np.fromfile(fname)
npp = fdata.shape[0] // (nx * ny)
fdata = fdata.reshape([ny, nx, npp])
print("Total number of particles: %d" % np.sum(fdata))
print("data size: %d %d %d (C-order)" % fdata.shape)
dists = fdata.reshape(ny//nry, nry, nx//nrx, nrx, npp//nrp, nrp)
dists_r = np.sum(np.sum(np.sum(dists, axis=5), axis=3), axis=1)
print("reduced data size: %d %d %d (C-order)" % dists_r.shape)
# Momentum bins (simulation unit)
pmin = 1E-2
pmax = 1E1
p0 = 1E-1
pmom = np.logspace(math.log10(pmin), math.log10(pmax), npp + 1)
pmom_mid = (pmom[:-1] + pmom[1:]) * 0.5
dpmom = np.diff(pmom)
pmom_r = pmom[::nrp]
pmom_mid_r = (pmom_r[:-1] + pmom_r[1:]) * 0.5
dpmom_r = np.diff(pmom_r)
fmom_r = dists_r * pmom_mid_r / dpmom_r # f(p)p^3
fene_r = fmom_r / pmom_mid_r*2 # f(p)p or f(e)
# Normalized energy bins
ene0 = 1.0 # 1keV
ene_shift = 1.0 # parameter to adjust the energy of the injected particles
elog_r = ene0 * pmom_r**2 / p0**2 # in keV
elog_r *= ene_shift # shift the injection energy
elog_mid_r = (elog_r[:-1] + elog_r[1:]) * 0.5
delog_r = np.diff(elog_r)
# Normalize the spectrum
# We assume the initially inject particles are about 5% of all particles
nptl_tot_real = L0**2 * rho0
ene_cut = 10 * ene0 # about 1% of the simulated particles
cutoff, _ = find_nearest(pmom, math.sqrt(ene_cut) * p0)
cutoff_r = cutoff // nrp
nptl_tot = np.sum(fdata)
nptl_above_cutoff = np.sum(fdata[:, :, cutoff:])
fnorm = nptl_tot_real * 0.05 / nptl_tot
fnorm *= nxr * nyr / L0**2
print("Assuming nonthermal start from %f keV" %
((pmom[cutoff]**2 * ene_shift) / p0**2))
print("Total Number of particles: %d" % nptl_tot)
print("Number of particles above break: %d" % nptl_above_cutoff)
print("Non-thermal fraction: %f" % (nptl_above_cutoff/nptl_tot))
if plot_config["check_dist"]:
# Plot the spatial distribution of the high-energy electrons
L0_Mm = L0 / 1E8 # to Mm
dists_r *= fnorm
dist_2d = np.sum(dists_r[:, :, cutoff_r+1:], axis=2)
if run_type == "Fan-Early":
rect = [0.12, 0.10, 0.7, 0.85]
fig = plt.figure(figsize=[7, 10])
ax1 = fig.add_axes(rect)
extent_box = [L0_Mm*0.5, L0_Mm, 0, L0_Mm]
vmin, vmax = 0, 1E9
elif run_type == "Bin-Fan":
rect = [0.12, 0.12, 0.7, 0.85]
fig = plt.figure(figsize=[8, 7])
ax1 = fig.add_axes(rect)
extent_box = [0, L0_Mm, 0, L0_Mm]
vmin, vmax = 0, 1E9
if run_type == "Harris_UMN":
rect = [0.12, 0.10, 0.68, 0.85]
fig = plt.figure(figsize=[7, 10])
ax1 = fig.add_axes(rect)
extent_box = [L0_Mm*0.5, L0_Mm, 0, L0_Mm]
vmin, vmax = 0, 2E7
img = ax1.imshow(dist_2d[:, xstart:], cmap=plt.cm.inferno,
vmin=vmin, vmax=vmax,
extent=extent_box,
aspect='auto', origin='lower',
interpolation='bicubic')
ecut = elog_r[cutoff_r+1]
ecut_s = "{%0.1f}" % ecut
label1 = r'$\varepsilon > ' + ecut_s + '$ keV'
ax1.text(0.05, 0.95, label1, color='white', fontsize=24,
horizontalalignment='left', verticalalignment='center',
transform=ax1.transAxes)
ax1.set_xlabel(r'$x$/Mm', fontdict=FONT, fontsize=24)
ax1.set_ylabel(r'$y$/Mm', fontdict=FONT, fontsize=24)
ax1.tick_params(labelsize=20)
rect[0] += rect[2] + 0.02
rect[2] = 0.04
cbar_ax = fig.add_axes(rect)
cbar = fig.colorbar(img, cax=cbar_ax)
cbar.ax.tick_params(labelsize=16)
cbar.ax.set_ylabel(r'$\rho$ (cm$^{-3}$)', fontsize=24)
fdir = '../img/fp_local/' + run_name + '/'
mkdir_p(fdir)
fname = fdir + "fp_local_reduced_" + str(tframe) + ".jpg"
fig.savefig(fname, dpi=200)
# Save the reduced spectrum
fdir = '../data/' + run_name + '/reduced/'
mkdir_p(fdir)
fname = fdir + "fe_local_reduced_" + str(tframe) + ".dat"
fene_r *= fnorm # normalize to real number density
fene_r *= 0.5 * p0**2 / (ene0 * ene_shift) # normalize to keV^-1
fene_r[:, xstart:, :].tofile(fname)
if plot_config["check_dist"]:
# Check the saved local spectrum
fdata = np.fromfile(fname)
fdata = fdata.reshape([nyr, nxr-xstart, -1])
fene_r = fdata * delog_r[np.newaxis, np.newaxis, :]
dist_2d = np.sum(fene_r[:, :, cutoff_r+1:], axis=2)
rect = [0.12, 0.10, 0.68, 0.85]
fig = plt.figure(figsize=[7, 10])
ax1 = fig.add_axes(rect)
img = ax1.imshow(dist_2d, cmap=plt.cm.inferno,
vmin=vmin, vmax=vmax,
extent=extent_box,
aspect='auto', origin='lower',
interpolation='bicubic')
if show_plot:
plt.show()
else:
plt.close('all')
def reduce_mhd_data(plot_config, mhd_config, mhd_run_info):
"""Reduce MHD data size
Args:
plot_config: plot configuration in dictionary
mhd_config: MHD simulation configuration
mhd_run_info: information of the MHD run
"""
run_type = plot_config["run_type"]
if run_type == "Fan-Early":
rho0 = 1.2E10 # cm^-3
b0 = 50 # Gauss
T0 = 6.0E6 # K
beta0 = 0.1
nrx, nry = 64, 32
if run_type == "Harris_UMN":
rho0 = 1.0E9 # cm^-3
b0 = 50 # Gauss
T0 = 1.0E6 # K
beta0 = 0.1
nrx, nry = 64, 64
tframe = plot_config["tframe"]
xmesh, ymesh, data = mhd_data.read_fields_data(mhd_run_info, tframe)
ny, nx = xmesh.shape
mhd_box = [xmesh[0, 0], xmesh[0, -1], ymesh[0, 0], ymesh[-1, 0], nx, ny]
rho = data[:, :, 0].T
pre = data[:, :, 1].T
bx = data[:, :, 5].T
by = data[:, :, 6].T
bz = data[:, :, 7].T
rho = rho.reshape(ny//nry, nry, nx//nrx, nrx)
pre = pre.reshape(ny//nry, nry, nx//nrx, nrx)
bx = bx.reshape(ny//nry, nry, nx//nrx, nrx)
by = by.reshape(ny//nry, nry, nx//nrx, nrx)
bz = bz.reshape(ny//nry, nry, nx//nrx, nrx)
rho_r = np.mean(np.mean(rho, axis=3), axis=1)
pre_r = np.mean(np.mean(pre, axis=3), axis=1)
bx_r = np.mean(np.mean(bx, axis=3), axis=1)
by_r = np.mean(np.mean(by, axis=3), axis=1)
bz_r = np.mean(np.mean(bz, axis=3), axis=1)
absB_r = np.sqrt(bx_r**2 + by_r**2 + bz_r**2)
T_r = pre_r / rho_r / (beta0 * 0.5)
rho_r *= rho0
bx *= b0
by *= b0
bz *= b0
absB_r *= b0
T_r *= T0
nxr = nx // nrx
nyr = ny // nry
fdir = plot_config["mhd_run_dir"] + "data_reduced/"
mkdir_p(fdir)
fname = fdir + 'rho_' + str(tframe) + '.dat'
rho_r[:, nxr//2:].tofile(fname)
fname = fdir + 'T_' + str(tframe) + '.dat'
T_r[:, nxr//2:].tofile(fname)
fname = fdir + 'bx_' + str(tframe) + '.dat'
bx_r[:, nxr//2:].tofile(fname)
fname = fdir + 'by_' + str(tframe) + '.dat'
by_r[:, nxr//2:].tofile(fname)
fname = fdir + 'bz_' + str(tframe) + '.dat'
bz_r[:, nxr//2:].tofile(fname)
fname = fdir + 'absB_' + str(tframe) + '.dat'
absB_r[:, nxr//2:].tofile(fname)
def calc_va(b0, n0, verbose=False):
"""Calculate the Alfven speed in m/s
Args:
b0: magnetic field strength in Gauss
n0: particle number density in cm^-3
"""
pmass = 1.6726219E-27 # in kilogram
mu0 = 4 * math.pi * 1E-7
va = b0 * 1E-4 / math.sqrt(mu0 * n0 * 1E6 * pmass)
print("The Alfven speed is %f km/s" % (va/1E3))
return va
def plot_reduced_mhd(plot_config, mhd_config, mhd_run_info, show_plot=True):
"""Plot reduced MHD data
"""
run_type = plot_config["run_type"]
if run_type == "Fan-Early":
rho0 = 1.2E10 # cm^-3
b0 = 50 # Gauss
T0 = 6.0E6 # K
L0 = 5.0E9 # in cm
nrx, nry = 64, 32
elif run_type == "Harris_UMN":
rho0 = 1.0E9 # cm^-3
b0 = 50 # Gauss
T0 = 1.0E6 # K
L0 = 2.5E9 # in cm
nrx, nry = 64, 64
va = calc_va(b0, rho0) # in m/s
time_norm = L0 / (va * 1E2)
rho_norm = 1.0E9 # cm^-3
bnorm = 50 # Gauss
Tnorm = 1.0E6 # K
nx, = mhd_config["nx"]
ny, = mhd_config["ny"]
nxr = nx // nrx
nyr = ny // nry
fdir = plot_config["mhd_run_dir"] + "data_reduced/"
tframe = plot_config["tframe"]
L0_Mm = L0 / 1E8
extent_box = [0, L0_Mm*0.5, 0, L0_Mm]
fig = plt.figure(figsize=[7, 5])
rect = [0.09, 0.13, 0.28, 0.8]
hgap, vgap = 0.02, 0.02
fname = fdir + 'rho_' + str(tframe) + '.dat'
fdata = np.fromfile(fname, dtype=np.float32)
fdata = fdata.reshape((nyr, nxr//2))
ax = fig.add_axes(rect)
img = ax.imshow(fdata/rho_norm, extent=extent_box, cmap=plt.cm.plasma,
aspect='auto', origin='lower',
interpolation='bicubic',
vmin=1, vmax=10)
ax.tick_params(labelsize=12)
ax.set_xlabel(r'$x$/Mm', fontsize=16)
ax.set_ylabel(r'$y$/Mm', fontsize=16)
ax.tick_params(bottom=True, top=False, left=True, right=True)
ax.tick_params(axis='x', which='minor', direction='in', top=True)
ax.tick_params(axis='x', which='major', direction='in', top=True)
ax.tick_params(axis='y', which='minor', direction='in')
ax.tick_params(axis='y', which='major', direction='in')
rect_cbar = np.copy(rect)
rect_cbar[0] += 0.02
rect_cbar[2] = 0.015
rect_cbar[1] = 0.4
rect_cbar[3] = 0.3
cbar_ax = fig.add_axes(rect_cbar)
cbar = fig.colorbar(img, cax=cbar_ax)
cbar.set_label(r'$\rho (10^9\text{cm}^{-3})$', color='w', fontsize=12)
cbar.ax.yaxis.set_tick_params(color='w')
cbar.outline.set_edgecolor('w')
plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color='w')
cbar.ax.tick_params(labelsize=12, color='w')
rect[0] += rect[2] + hgap
fname = fdir + 'T_' + str(tframe) + '.dat'
fdata = np.fromfile(fname, dtype=np.float32)
fdata = fdata.reshape((nyr, nxr//2))
ax = fig.add_axes(rect)
img = ax.imshow(fdata/Tnorm, extent=extent_box, cmap=plt.cm.viridis,
aspect='auto', origin='lower',
interpolation='bicubic',
vmin=0.1, vmax=10)
ax.tick_params(labelsize=12)
ax.tick_params(axis='y', labelleft=False)
ax.set_xlabel(r'$x$/Mm', fontsize=16)
time = mhd_config["dt_out"][0] * time_norm * tframe
tname = "Time: %0.1f s" % time
plt.title(tname, fontsize=16)
ax.tick_params(bottom=True, top=False, left=True, right=True)
ax.tick_params(axis='x', which='minor', direction='in', top=True)
ax.tick_params(axis='x', which='major', direction='in', top=True)
ax.tick_params(axis='y', which='minor', direction='in')
ax.tick_params(axis='y', which='major', direction='in')
rect_cbar = np.copy(rect)
rect_cbar[0] += 0.02
rect_cbar[2] = 0.015
rect_cbar[1] = 0.4
rect_cbar[3] = 0.3
cbar_ax = fig.add_axes(rect_cbar)
cbar = fig.colorbar(img, cax=cbar_ax)
cbar.set_label(r'$T (10^6\text{K})$', color='w', fontsize=12)
cbar.ax.yaxis.set_tick_params(color='w')
cbar.outline.set_edgecolor('w')
plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color='w')
cbar.ax.tick_params(labelsize=12, color='w')
rect[0] += rect[2] + hgap
fname = fdir + 'absB_' + str(tframe) + '.dat'
fdata = np.fromfile(fname, dtype=np.float32)
fdata = fdata.reshape((nyr, nxr//2))
ax = fig.add_axes(rect)
img = ax.imshow(fdata/bnorm, extent=extent_box, cmap=plt.cm.plasma,
aspect='auto', origin='lower',
interpolation='bicubic',
vmin=0.0, vmax=2.0)
ax.tick_params(labelsize=12)
ax.tick_params(axis='y', labelleft=False)
ax.set_xlabel(r'$x$/Mm', fontsize=16)
ax.tick_params(bottom=True, top=False, left=True, right=True)
ax.tick_params(axis='x', which='minor', direction='in', top=True)
ax.tick_params(axis='x', which='major', direction='in', top=True)
ax.tick_params(axis='y', which='minor', direction='in')
ax.tick_params(axis='y', which='major', direction='in')
rect_cbar = np.copy(rect)
rect_cbar[0] += 0.02
rect_cbar[2] = 0.015
rect_cbar[1] = 0.4
rect_cbar[3] = 0.3
cbar_ax = fig.add_axes(rect_cbar)
cbar = fig.colorbar(img, cax=cbar_ax)
cbar.set_label(r'$B (\text{50 Gauss})$', color='w', fontsize=12)
cbar.ax.yaxis.set_tick_params(color='w')
cbar.outline.set_edgecolor('w')
plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color='w')
cbar.ax.tick_params(labelsize=12, color='w')
img_dir = '../img/' + mhd_run_info["run_name"] + '/mhd_fields_reduced/'
mkdir_p(img_dir)
fname = img_dir + 'mhd_fields_' + str(tframe) + '.jpg'
fig.savefig(fname, dpi=400)
if not show_plot:
plt.close()
if show_plot:
plt.show()
def get_mhd_info(args):
"""Get MHD run information
"""
mhd_run_info = {}
mhd_run_info["run_name"] = args.mhd_run
mhd_run_info["run_dir"] = args.mhd_run_dir
mhd_run_info["run_type"] = args.mhd_run_type
mhd_run_info["mhd_code"] = args.mhd_code
mhd_run_info["config_name"] = mhd_run_info["run_dir"] + args.config_name
return mhd_run_info
def get_cmd_args():
"""Get command line arguments
"""
default_mhd_run = 'S1E5_beta01_bg00'
default_mhd_run_dir = ('/net/scratch3/xiaocanli/mhd/guide_field_scaling/' +
default_mhd_run + '/')
default_sde_run = 'p000_b000_00047_100_l'
parser = argparse.ArgumentParser(description='Spatial distribution')
parser.add_argument('--mhd_run', action="store",
default=default_mhd_run, help='MHD run name')
parser.add_argument('--mhd_run_dir', action="store",
default=default_mhd_run_dir, help='MHD run directory')
parser.add_argument('--mhd_run_type', action="store", default="reconnection",
help='MHD run type')
parser.add_argument('--mhd_code', action="store", default="Athena",
help='MHD code')
parser.add_argument('--config_name', action="store",
default="athinput.reconnection",
help='MHD configuration filename')
parser.add_argument('--sde_run', action="store",
default=default_sde_run, help='SDE run name')
parser.add_argument('--tframe', action="store", default='200', type=int,
help='Time frame')
parser.add_argument('--multi_frames', action="store_true", default=False,
help='whether to analyze multiple frames')
parser.add_argument('--time_loop', action="store_true", default=False,
help='whether to loop over time instead of using joblib')
parser.add_argument('--tstart', action="store", default='0', type=int,
help='starting time frame')
parser.add_argument('--tend', action="store", default='10', type=int,
help='ending time frame')
parser.add_argument('--local_dist', action="store_true", default=False,
help='whether to plot spatial distribution')
parser.add_argument('--check_dist', action="store_true", default=False,
help='whether to check local spatial distribution')
parser.add_argument('--reduce_mhd', action="store_true", default=False,
help='whether to reduce MHD data size')
parser.add_argument('--run_type', action="store", default="Fan-Early",
help='What kind of run')
parser.add_argument('--plot_rmhd', action="store_true", default=False,
help='whether to plot reduced MHD data')
return parser.parse_args()
def analysis_single_frame(plot_config, mhd_config, args):
"""Analysis for multiple time frames
"""
mhd_run_info = get_mhd_info(args)
if args.local_dist:
reduce_local_dist(plot_config, mhd_config, show_plot=True)
elif args.reduce_mhd:
reduce_mhd_data(plot_config, mhd_config, mhd_run_info)
elif args.plot_rmhd:
plot_reduced_mhd(plot_config, mhd_config, mhd_run_info)
def process_input(plot_config, mhd_config, args, tframe):
"""process one time frame"""
plot_config["tframe"] = tframe
mhd_run_info = get_mhd_info(args)
if args.local_dist:
reduce_local_dist(plot_config, mhd_config, show_plot=False)
elif args.reduce_mhd:
reduce_mhd_data(plot_config, mhd_config, mhd_run_info)
elif args.plot_rmhd:
plot_reduced_mhd(plot_config, mhd_config, mhd_run_info, show_plot=False)
def analysis_multi_frames(plot_config, mhd_config, args):
"""Analysis for multiple time frames
"""
tframes = range(plot_config["tmin"], plot_config["tmax"] + 1)
mhd_run_info = get_mhd_info(args)
if args.time_loop:
if args.local_dist:
for tframe in tframes:
print("Time frame: %d" % tframe)
plot_config["tframe"] = tframe
reduce_local_dist(plot_config, mhd_config, show_plot=False)
elif args.plot_rmhd:
for tframe in tframes:
print("Time frame: %d" % tframe)
plot_config["tframe"] = tframe
plot_reduced_mhd(plot_config, mhd_config,
mhd_run_info, show_plot=False)
else:
ncores = multiprocessing.cpu_count()
ncores = 8
Parallel(n_jobs=ncores)(delayed(process_input)(plot_config, mhd_config,
args, tframe)
for tframe in tframes)
def main():
"""business logic for when running this module as the primary one!"""
args = get_cmd_args()
with open('config/spectrum_config_bg.json', 'r') as file_handler:
config = json.load(file_handler)
mhd_config = load_mhd_config(args.mhd_run_dir)
plot_config = {}
run_name = args.mhd_run + "/" + args.sde_run
sde_run_config = config[run_name]
nreduce = sde_run_config["nreduce"]
plot_config["nx"] = mhd_config["nx"] // nreduce
plot_config["ny"] = mhd_config["ny"] // nreduce
plot_config["tmax"] = sde_run_config["tmax"]
plot_config["tmin"] = sde_run_config["tmin"]
plot_config["run_name"] = run_name
plot_config["tframe"] = args.tframe
plot_config["mhd_run_dir"] = args.mhd_run_dir
plot_config["run_type"] = args.run_type
plot_config["check_dist"] = args.check_dist
tframes = [150, 175, 200]
plot_config["tframes"] = tframes
if args.multi_frames:
analysis_multi_frames(plot_config, mhd_config, args)
else:
analysis_single_frame(plot_config, mhd_config, args)
if __name__ == "__main__":
main()
| gpl-3.0 |
victorbergelin/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28778 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
kose-y/pylearn2 | pylearn2/cross_validation/tests/test_subset_iterators.py | 49 | 2411 | """
Test subset iterators.
"""
import numpy as np
from pylearn2.testing.skip import skip_if_no_sklearn
def test_validation_k_fold():
"""Test ValidationKFold."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import ValidationKFold
n = 30
# test with indices
cv = ValidationKFold(n)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n / cv.n_folds
assert test.size == n / cv.n_folds
def test_stratified_validation_k_fold():
"""Test StratifiedValidationKFold."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
StratifiedValidationKFold)
n = 30
y = np.concatenate((np.zeros(n / 2, dtype=int), np.ones(n / 2, dtype=int)))
# test with indices
cv = StratifiedValidationKFold(y)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n / cv.n_folds
assert test.size == n / cv.n_folds
assert np.count_nonzero(y[valid]) == (n / 2) * (1. / cv.n_folds)
assert np.count_nonzero(y[test]) == (n / 2) * (1. / cv.n_folds)
def test_validation_shuffle_split():
"""Test ValidationShuffleSplit."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
ValidationShuffleSplit)
n = 30
# test with indices
cv = ValidationShuffleSplit(n)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n * cv.test_size
assert test.size == n * cv.test_size
def test_stratified_validation_shuffle_split():
"""Test StratifiedValidationShuffleSplit."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
StratifiedValidationShuffleSplit)
n = 60
y = np.concatenate((np.zeros(n / 2, dtype=int), np.ones(n / 2, dtype=int)))
# test with indices
cv = StratifiedValidationShuffleSplit(y)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n * cv.test_size
assert test.size == n * cv.test_size
assert np.count_nonzero(y[valid]) == (n / 2) * cv.test_size
assert np.count_nonzero(y[test]) == (n / 2) * cv.test_size
| bsd-3-clause |
elkingtonmcb/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
MatthieuBizien/scikit-learn | sklearn/utils/validation.py | 15 | 25983 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from .deprecation import deprecated
from ..exceptions import DataConversionWarning as _DataConversionWarning
from ..exceptions import NonBLASDotWarning as _NonBLASDotWarning
from ..exceptions import NotFittedError as _NotFittedError
@deprecated("DataConversionWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class DataConversionWarning(_DataConversionWarning):
pass
@deprecated("NonBLASDotWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class NonBLASDotWarning(_NonBLASDotWarning):
pass
@deprecated("NotFittedError has been moved into the sklearn.exceptions module."
" It will not be available here from version 0.19")
class NotFittedError(_NotFittedError):
pass
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', _NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
if ensure_min_samples >= 2:
raise ValueError("%s expects at least 2 samples provided "
"in a 2 dimensional array-like input"
% estimator_name)
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, _DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
_DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
# FIXME NotFittedError_ --> NotFittedError in 0.19
raise _NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
nonhermitian/scipy | scipy/signal/_max_len_seq.py | 41 | 4942 | # Author: Eric Larson
# 2014
"""Tools for MLS generation"""
import numpy as np
from ._max_len_seq_inner import _max_len_seq_inner
__all__ = ['max_len_seq']
# These are definitions of linear shift register taps for use in max_len_seq()
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
31: [28], 32: [31, 30, 10]}
def max_len_seq(nbits, state=None, length=None, taps=None):
"""
Maximum length sequence (MLS) generator.
Parameters
----------
nbits : int
Number of bits to use. Length of the resulting sequence will
be ``(2**nbits) - 1``. Note that generating long sequences
(e.g., greater than ``nbits == 16``) can take a long time.
state : array_like, optional
If array, must be of length ``nbits``, and will be cast to binary
(bool) representation. If None, a seed of ones will be used,
producing a repeatable representation. If ``state`` is all
zeros, an error is raised as this is invalid. Default: None.
length : int, optional
Number of samples to compute. If None, the entire length
``(2**nbits) - 1`` is computed.
taps : array_like, optional
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
If None, taps will be automatically selected (for up to
``nbits == 32``).
Returns
-------
seq : array
Resulting MLS sequence of 0's and 1's.
state : array
The final state of the shift register.
Notes
-----
The algorithm for MLS generation is generically described in:
https://en.wikipedia.org/wiki/Maximum_length_sequence
The default values for taps are specifically taken from the first
option listed for each value of ``nbits`` in:
http://www.newwaveinstruments.com/resources/articles/
m_sequence_linear_feedback_shift_register_lfsr.htm
.. versionadded:: 0.15.0
Examples
--------
MLS uses binary convention:
>>> from scipy.signal import max_len_seq
>>> max_len_seq(4)[0]
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
MLS has a white spectrum (except for DC):
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
>>> spec = fft(seq)
>>> N = len(seq)
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Circular autocorrelation of MLS is an impulse:
>>> acorrcirc = ifft(spec * np.conj(spec)).real
>>> plt.figure()
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Linear autocorrelation of MLS is approximately an impulse:
>>> acorr = np.correlate(seq, seq, 'full')
>>> plt.figure()
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
"""
if taps is None:
if nbits not in _mls_taps:
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError('nbits must be between %s and %s if taps is None'
% (known_taps.min(), known_taps.max()))
taps = np.array(_mls_taps[nbits], np.intp)
else:
taps = np.unique(np.array(taps, np.intp))[::-1]
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
raise ValueError('taps must be non-empty with values between '
'zero and nbits (inclusive)')
taps = np.ascontiguousarray(taps) # needed for Cython
n_max = (2**nbits) - 1
if length is None:
length = n_max
else:
length = int(length)
if length < 0:
raise ValueError('length must be greater than or equal to 0')
# We use int8 instead of bool here because numpy arrays of bools
# don't seem to work nicely with Cython
if state is None:
state = np.ones(nbits, dtype=np.int8, order='c')
else:
# makes a copy if need be, ensuring it's 0's and 1's
state = np.array(state, dtype=bool, order='c').astype(np.int8)
if state.ndim != 1 or state.size != nbits:
raise ValueError('state must be a 1-dimensional array of size nbits')
if np.all(state == 0):
raise ValueError('state must not be all zeros')
seq = np.empty(length, dtype=np.int8, order='c')
state = _max_len_seq_inner(taps, state, nbits, length, seq)
return seq, state
| bsd-3-clause |
raghavrv/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 22 | 20592 | from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
rng = np.random.RandomState(1000)
outliers = np.unique(rng.randint(len(X), size=200))
data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
y = rng.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
# there is a 1e-9 chance it will take these many trials. No good reason
# 1e-2 isn't enough, can still happen
# 2 is the what ransac defines as min_samples = X.shape[1] + 1
max_trials = _dynamic_max_trials(
len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2)
for i in range(50):
ransac_estimator.set_params(min_samples=2, random_state=i)
ransac_estimator.fit(X, y)
assert_less(ransac_estimator.n_trials_, max_trials + 1)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 5)
assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_no_valid_data():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 5)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_no_valid_model():
def is_model_valid(estimator, X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_model_valid=is_model_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
assert_equal(ransac_estimator.n_skips_invalid_model_, 5)
def test_ransac_exceed_max_skips():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5,
max_skips=3)
msg = ("RANSAC skipped more iterations than `max_skips`")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_warn_exceed_max_skips():
global cause_skip
cause_skip = False
def is_data_valid(X, y):
global cause_skip
if not cause_skip:
cause_skip = True
return True
else:
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_skips=3,
max_trials=5)
assert_warns(UserWarning, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
| bsd-3-clause |
andaag/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
liangz0707/scikit-learn | examples/ensemble/plot_feature_transformation.py | 67 | 4285 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <betatim@gmail.com>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator)
rt_lm = LogisticRegression()
rt.fit(X_train, y_train)
rt_lm.fit(rt.transform(X_train_lr), y_train_lr)
y_pred_rt = rt_lm.predict_proba(rt.transform(X_test))[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show() | bsd-3-clause |
erikgrinaker/BOUT-dev | tools/pylib/post_bout/pb_present.py | 7 | 6244 | from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import range
from .pb_draw import LinResDraw,subset
from .pb_corral import LinRes
from .pb_nonlinear import NLinResDraw
from pb_transport import Transport
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.artist as artist
import matplotlib.ticker as ticker
#from matplotlib.ticker import FuncFormatter
#from matplotlib.ticker import ScalarFormatter
from reportlab.platypus import *
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch
from reportlab.graphics.charts.linecharts import HorizontalLineChart
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.charts.lineplots import LinePlot
from reportlab.graphics.widgets.markers import makeMarker
from reportlab.lib import colors
from replab_x_vs_y import RL_Plot
#for movie making
from multiprocessing import Queue,Pool
import multiprocessing
import subprocess
#uses LinResDraw to make a pdf
class LinResPresent(LinResDraw,NLinResDraw,Transport):
def __init__(self,alldb):
LinResDraw.__init__(self,alldb)
NLinResDraw.__init__(self,alldb)
Transport.__init__(self,alldb)
def show(self,filter =True,quick=False,pdfname='output2.pdf',debug=False,spectrum_movie=False):
colors = ['b','g','r','c','m','y','k','b','g','r','c','m','y','k']
pp = PdfPages('output.pdf')
#start by removing modes above the maxN threshold
modelist =[]
[modelist.append(list(self.modeid[p])) for p in range(self.nmodes) if self.mn[p][1] <= self.maxN[p] ]
s = subset(self.db,'modeid',modelist)
try:
#fig = Figure(figsize=(6,6))
# fig = plt.figure()
dz0 = list(set(s.dz).union())[0]
ss = subset(s.db,'dz',[dz0])
# show initial condition and the first step after
s.plotvsK(pp,yscale='log',xscale='log',t=[0,1,-1],overplot=False,comp='amp',trans=True)
if spectrum_movie:
ss.savemovie()
except:
print('no scatter')
#2D true NM spectrum with color code and boxes around spectral res regions log scale
plt.figure()
i = 0
for j in list(set(s.dz).union()): #looping over runs, over unique 'dz' key values
ss = subset(s.db,'dz',[j]) #subset where dz = j
plt.scatter(ss.MN[:,1],ss.MN[:,0],c=colors[i])
plt.annotate(str(j),(ss.MN[0,1],ss.MN[0,0]))
i+=1
plt.title(' Ni spectrum at t=0, all x')
plt.ylabel('M -parallel')
plt.xlabel('N - axisymmteric')
plt.xscale('log')
plt.grid(True,linestyle='-',color='.75')
try:
plt.savefig(pp, format='pdf')
except:
print('FAILED TO save 1st part')
plt.close()
# for elem in self.meta['evolved']['v']:
# s.plotnl(pp
if self.meta['nonlinear']['v'] == 'true':
self.plotnlrhs(pp)
if self.meta['transport'] == 'true':
self.plotnlrms(pp)
for elem in self.meta['evolved']:
s.plotmodes(pp,yscale='symlog',comp='phase',linestyle='.',field=elem,summary=False)
s.plotmodes(pp,yscale='symlog',field=elem,summary=False)
print(elem)
try:
s.plotmodes(pp,yscale='symlog',field=elem,comp='gamma_i',summary=False)
except:
print('gamma_i plot for '+elem+' failed')
#s.plotmodes(pp,yscale='symlog',summary=False)
modelist = []
# maxZ =
#[modelist.append([1,p+1]) for p in range(maxZ-1)]
[modelist.append(list(self.modeid[p])) for p in range(self.nmodes) if self.mn[p][1] <= self.maxN[p] ]
ss = subset(s.db,'mn',modelist)
if debug: #just a few problematic slides
fig1 = plt.figure()
pp_bug = PdfPages('debug.pdf')
#ss.plotmodes(pp_bug,yscale='symlog',comp='phase',summary=False)
s.plotfreq2(pp_bug,xscale='log',yscale='symlog',overplot=True)
ss.plotgamma(pp_bug,xscale='log',yscale='symlog',overplot=True)
ss.plottheory(pp_bug)
ss.plottheory(pp_bug,comp='freq')
fig1.savefig(pp_bug, format='pdf')
pp_bug.close()
pp.close()
return 0
dir(ss)
ss.plotmodes(pp,yscale='log',debug=True,summary=False)
ss.plotmodes(pp,yscale='symlog',comp='phase',summary=False)
ss.plotmodes(pp,yscale='symlog',comp='phase',field='rho',summary=False)
print(dir(ss))
#ss.plotmodes(pp,yscale='log',comp='phase',clip=True)
#ss.plotfreq2(pp,xscale='log',yscale='linear',overplot=False)
for elem in self.meta['evolved']:
ss.plotfreq2(pp,xscale='log',yscale='symlog',field=elem,
overplot=True,trans=True)
#ss.plotfreq2(pp,xscale='log',yscale='symlog',field='rho',overplot=True)
if quick==True:
pp.close()
s.printmeta(pp)
#plt.savefig(pp, format='pdf')
return 0
all_fields = list(set(s.field).union())
s.plotgamma(pp,xscale='log',yscale='linear',overplot=True,trans=True)
s.plotgamma(pp,yscale='symlog',xscale='log',overplot=True)
s.plotgamma(pp,yscale='symlog',xscale='log',field='rho',overplot=True)
try:
s.plotfreq2(pp,xscale='log',yscale='linear',overplot=True)
#s.plotfreq2(pp,xscale='log',yscale='symlog',overplot=False)
s.plotfreq2(pp,xscale='log',yscale='symlog',field='rho',overplot=True)
#s.plotfreq2(pp,xscale='log',yscale='linear')
except:
print('something terrible')
s.plotradeigen(pp,yscale='linear')
#s.plotradeigen(pp,field ='Vi',yscale='linear')
s.plotradeigen(pp,field='rho',yscale='log')
pp.close()
s.printmeta(pp,filename=pdfname) #append a metadata header
| gpl-3.0 |
sdsc/xsede_stats | tacc_stats/analysis/plot/plots.py | 1 | 6375 | # Plot generation tools for job analysis
from __future__ import print_function
import os
import abc
import numpy,traceback
import multiprocessing
from scipy.stats import scoreatpercentile as score
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_pdf import FigureCanvasPdf
from tacc_stats.analysis.gen import tspl,tspl_utils,my_utils
## Multiprocessing Unwrapper
#
# Multiprocessor module cannot work with class objects.
# This unwrapper accepts a Plot class and extracts the
# class method plot.
def unwrap(arg):
try:
kwarg = arg[2]
return arg[0].plot(arg[1],**kwarg)
except:
print(traceback.format_exc())
## Plot Class
#
# This is an abstract base class for plotting.
class Plot(object):
__metaclass__ = abc.ABCMeta
fig = Figure()
ts=None
## Default constructor
def __init__(self,processes=1,**kwargs):
self.processes=processes
self.mode=kwargs.get('mode','lines')
self.threshold=kwargs.get('threshold',None)
self.outdir=kwargs.get('outdir','.')
self.prefix=kwargs.get('prefix','')
self.header=kwargs.get('header',None)
self.wide=kwargs.get('wide',False)
self.save=kwargs.get('save',False)
self.aggregate=kwargs.get('aggregate',True)
def setup(self,jobid,job_data=None):
try:
if self.aggregate:
self.ts=tspl.TSPLSum(jobid,self.k1,self.k2,job_data=job_data)
else:
self.ts=tspl.TSPLBase(jobid,self.k1,self.k2,job_data=job_data)
except tspl.TSPLException as e:
return False
except EOFError as e:
print('End of file found reading: ' + jobid)
return False
return True
## Plot the list of files using multiprocessing
def run(self,filelist,**kwargs):
if not filelist: return
# Cache the Lariat Data Dict
self.setup(filelist[0])
self.setup(filelist[-1])
pool=multiprocessing.Pool(processes=self.processes)
pool.map(unwrap,zip([self]*len(filelist),filelist,[kwargs]*len(filelist)))
## Set the x and y axis labels for a plot
def setlabels(self,ax,index,xlabel,ylabel,yscale):
if xlabel != '':
ax.set_xlabel(xlabel)
if ylabel != '':
ax.set_ylabel(ylabel)
else:
ax.set_ylabel('Total '+self.ts.label(self.ts.k1[index[0]],
self.ts.k2[index[0]],yscale)+'/s' )
# Plots lines for each host
def plot_lines(self,ax,index,xscale=1.0,yscale=1.0,xlabel='',ylabel='',
do_rate=True):
ax.hold=True
for k in self.ts.j.hosts.keys():
v=self.ts.assemble(index,k,0)
if do_rate:
val=numpy.divide(numpy.diff(v),numpy.diff(self.ts.t))
else:
val=(v[:-1]+v[1:])/(2.0)
ax.step(self.ts.t/xscale,numpy.append(val,[val[-1]])/yscale,where="post")
tspl_utils.adjust_yaxis_range(ax,0.1)
ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(nbins=6))
self.setlabels(ax,index,xlabel,ylabel,yscale)
ax.set_xlim([0,self.ts.t[-1]/3600.])
# Plots "time histograms" for every host
# This code is likely inefficient
def plot_thist(self,ax,index,xscale=1.0,yscale=1.0,xlabel='',ylabel='',
do_rate=False):
d=[]
for k in self.ts.j.hosts.keys():
v=self.ts.assemble(index,k,0)
if do_rate:
d.append(numpy.divide(numpy.diff(v),numpy.diff(self.ts.t)))
else:
d.append((v[:-1]+v[1:])/2.0)
a=numpy.array(d)
h=[]
mn=numpy.min(a)
mn=min(0.,mn)
mx=numpy.max(a)
n=float(len(self.ts.j.hosts.keys()))
for i in range(len(self.ts.t)-1):
hist=numpy.histogram(a[:,i],30,(mn,mx))
h.append(hist[0])
h2=numpy.transpose(numpy.array(h))
ax.pcolor(self.ts.t/xscale,hist[1]/yscale,h2,
edgecolors='none',rasterized=True,cmap='spectral')
self.setlabels(ax,self.ts,index,xlabel,ylabel,yscale)
ax.autoscale(tight=True)
def plot_mmm(self,ax,index,xscale=1.0,yscale=1.0,xlabel='',ylabel='',
do_rate=False):
tmid=(self.ts.t[:-1]+self.ts.t[1:])/2.0
d=[]
for k in self.ts.j.hosts.keys():
v=self.ts.assemble(index,k,0)
if do_rate:
d.append(numpy.divide(numpy.diff(v),numpy.diff(self.ts.t)))
else:
d.append((v[:-1]+v[1:])/2.0)
a=numpy.array(d)
mn=[]
p25=[]
p50=[]
p75=[]
mx=[]
for i in range(len(self.ts.t)-1):
mn.append(min(a[:,i]))
p25.append(score(a[:,i],25))
p50.append(score(a[:,i],50))
p75.append(score(a[:,i],75))
mx.append(max(a[:,i]))
mn=numpy.array(mn)
p25=numpy.array(p25)
p50=numpy.array(p50)
p75=numpy.array(p75)
mx=numpy.array(mx)
ax.hold=True
ax.plot(tmid/xscale,mn/yscale,'--')
ax.plot(tmid/xscale,p25/yscale)
ax.plot(tmid/xscale,p50/yscale)
ax.plot(tmid/xscale,p75/yscale)
ax.plot(tmid/xscale,mx/yscale,'--')
self.setlabels(ax,index,xlabel,ylabel,yscale)
ax.yaxis.set_major_locator( matplotlib.ticker.MaxNLocator(nbins=4))
tspl_utils.adjust_yaxis_range(ax,0.1)
def output(self,file_suffix):
if self.wide:
left_text=self.header + '\n' + self.ts.title
text_len=len(left_text.split('\n'))
fontsize=self.ax.yaxis.label.get_size()
linespacing=1.2
fontrate=float(fontsize*linespacing)/72./15.5
yloc=.8-fontrate*(text_len-1) # this doesn't quite work. fontrate is too
# small by a small amount
self.fig.text(.05,yloc,left_text,linespacing=linespacing)
self.fname='_'.join([self.prefix,self.ts.j.id,self.ts.owner,'wide_'+file_suffix])
elif self.header != None:
title=self.header+'\n'+self.ts.title
if self.threshold:
title+=', V: %(v)-6.1f' % {'v': self.threshold}
self.fig.suptitle(title)
self.fname='_'.join([self.prefix,self.ts.j.id,self.ts.owner,file_suffix])
else:
self.fname='_'.join([self.prefix,self.ts.j.id,self.ts.owner,file_suffix])
if self.mode == 'hist':
self.fname+='_hist'
elif self.mode == 'percentile':
self.fname+='_perc'
if not self.save:
self.canvas = FigureCanvasAgg(self.fig)
else:
self.canvas = FigureCanvasPdf(self.fig)
self.fig.savefig(os.path.join(self.outdir,self.fname))
@abc.abstractmethod
def plot(self,jobid,job_data=None):
"""Run the test for a single job"""
return
| lgpl-2.1 |
Ttl/scikit-rf | skrf/__init__.py | 2 | 1988 | '''
skrf is an object-oriented approach to microwave engineering,
implemented in Python.
'''
# Python 3 compatibility
from __future__ import absolute_import, print_function, division
from six.moves import xrange
__version__ = '0.14.5'
## Import all module names for coherent reference of name-space
#import io
from . import frequency
from . import network
from . import networkSet
from . import media
from . import calibration
# from . import plotting
from . import mathFunctions
from . import tlineFunctions
from . import taper
from . import constants
from . import util
from . import io
from . import instances
# Import contents into current namespace for ease of calling
from .frequency import *
from .network import *
from .networkSet import *
from .calibration import *
from .util import *
# from .plotting import *
from .mathFunctions import *
from .tlineFunctions import *
from .io import *
from .constants import *
from .taper import *
from .instances import *
# Try to import vi, but if except if pyvisa not installed
try:
import vi
from vi import *
except(ImportError):
pass
# try to import data but if it fails whatever. it fails if some pickles
# dont unpickle. but its not important
try:
from . import data
except:
pass
## built-in imports
from copy import deepcopy as copy
## Shorthand Names
F = Frequency
N = Network
NS = NetworkSet
lat = load_all_touchstones
# saf = save_all_figs
saf = None
stylely = None
def setup_pylab():
from . import plotting
plotting.setup_matplotlib_plotting()
global saf, stylely
saf = plotting.save_all_figs
stylely = plotting.stylely
def setup_plotting():
plotting_environment = os.environ.get('SKRF_PLOT_ENV', "pylab").lower()
if plotting_environment == "pylab":
setup_pylab()
elif plotting_environment == "pylab-skrf-style":
setup_pylab()
stylely()
# elif some different plotting environment
# set that up
setup_plotting()
| bsd-3-clause |
JoonasMelin/WeatherStation | station.py | 1 | 14760 | #!/usr/bin/python
# include RPi libraries in to Python code
import plotly.plotly as py
import plotly.graph_objs as go
import json
import datetime
import RPi.GPIO as GPIO
import time
import smbus
import time
from ctypes import c_short
import sys
import Adafruit_DHT
import cPickle as pickle
from collections import deque
import time
import threading
from functools import wraps
import numpy as np
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import scipy
from scipy import signal
DEVICE = 0x77 # Default device I2C address
#bus = smbus.SMBus(0) # Rev 1 Pi uses 0
bus = smbus.SMBus(1) # Rev 2 Pi uses 1
# instantiate GPIO as an object
GPIO.setmode(GPIO.BOARD)
# define GPIO pins with variables a_pin and b_pin
pull_up_pin = 7
humidity_pin = 15
light_pin = 16
class RingBuffer(object):
def __init__(self, size_max, default_value=0.0, dtype=np.float16):
"""initialization"""
self.size_max = size_max
self._data = np.empty(size_max, dtype=dtype)
self._data.fill(default_value)
self.size = 0
def append(self, value):
"""append an element"""
self._data = np.roll(self._data, 1)
self._data[0] = value
self.size += 1
if self.size == self.size_max:
self.__class__ = RingBufferFull
def get_all(self):
"""return a list of elements from the oldest to the newest"""
return(self._data)
def get_partial(self):
return(self.get_all()[0:self.size])
def get_up_to(self, max_elems):
elem_to = max_elems
if max_elems > self.size:
elem_to = self.size
return(self.get_all()[0:elem_to])
def __getitem__(self, key):
"""get element"""
return(self._data[key])
def __repr__(self):
"""return string representation"""
s = self._data.__repr__()
s = s + '\t' + str(self.size)
s = s + '\t' + self.get_all()[::-1].__repr__()
s = s + '\t' + self.get_partial()[::-1].__repr__()
return(s)
class RingBufferFull(RingBuffer):
def append(self, value):
"""append an element when buffer is full"""
self._data = np.roll(self._data, 1)
self._data[0] = value
# create discharge function for reading capacitor data
def setup():
GPIO.setup(pull_up_pin, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(humidity_pin, GPIO.IN)
GPIO.setup(light_pin, GPIO.IN)
time.sleep(2)
def read_temperature_from(sensor_id):
# Open the file that we viewed earlier so that python can see what is in it. Replace the serial number as before.
temperature = 0
try:
tfile = open(("/sys/bus/w1/devices/%s/w1_slave"%sensor_id))
# Read all of the text in the file.
text = tfile.read()
# Close the file now that the text has been read.
tfile.close()
# Split the text with new lines (\n) and select the second line.
secondline = text.split("\n")[1]
# Split the line into words, referring to the spaces, and select the 10th word (counting from 0).
temperaturedata = secondline.split(" ")[9]
# The first two characters are "t=", so get rid of those and convert the temperature from a string to a number.
temperature = float(temperaturedata[2:])
# Put the decimal point in the right place and display it.
temperature = temperature / 1000
except:
print("Could not read sensor:", sys.exc_info()[0])
#print("temp from %s is %s"% (sensor_id, temperature))
return temperature
def convertToString(data):
# Simple function to convert binary data into
# a string
return str((data[1] + (256 * data[0])) / 1.2)
def getShort(data, index):
# return two bytes from data as a signed 16-bit value
return c_short((data[index]<< 8) + data[index + 1]).value
def getUshort(data, index):
# return two bytes from data as an unsigned 16-bit value
return (data[index]<< 8) + data[index+1]
def readBmp180Id(addr=DEVICE):
# Register Address
REG_ID = 0xD0
(chip_id, chip_version) = bus.read_i2c_block_data(addr, REG_ID, 2)
return (chip_id, chip_version)
def readBmp180(addr=DEVICE):
# Register Addresses
REG_CALIB = 0xAA
REG_MEAS = 0xF4
REG_MSB = 0xF6
REG_LSB = 0xF7
# Control Register Address
CRV_TEMP = 0x2E
CRV_PRES = 0x34
# Oversample setting
OVERSAMPLE = 3 # 0 - 3
# Read calibration data
# Read calibration data from EEPROM
cal = bus.read_i2c_block_data(addr, REG_CALIB, 22)
# Convert byte data to word values
AC1 = getShort(cal, 0)
AC2 = getShort(cal, 2)
AC3 = getShort(cal, 4)
AC4 = getUshort(cal, 6)
AC5 = getUshort(cal, 8)
AC6 = getUshort(cal, 10)
B1 = getShort(cal, 12)
B2 = getShort(cal, 14)
MB = getShort(cal, 16)
MC = getShort(cal, 18)
MD = getShort(cal, 20)
# Read temperature
bus.write_byte_data(addr, REG_MEAS, CRV_TEMP)
time.sleep(0.005)
(msb, lsb) = bus.read_i2c_block_data(addr, REG_MSB, 2)
UT = (msb << 8) + lsb
# Read pressure
bus.write_byte_data(addr, REG_MEAS, CRV_PRES + (OVERSAMPLE << 6))
time.sleep(0.04)
(msb, lsb, xsb) = bus.read_i2c_block_data(addr, REG_MSB, 3)
UP = ((msb << 16) + (lsb << 8) + xsb) >> (8 - OVERSAMPLE)
# Refine temperature
X1 = ((UT - AC6) * AC5) >> 15
X2 = (MC << 11) / (X1 + MD)
B5 = X1 + X2
temperature = (B5 + 8) >> 4
# Refine pressure
B6 = B5 - 4000
B62 = B6 * B6 >> 12
X1 = (B2 * B62) >> 11
X2 = AC2 * B6 >> 11
X3 = X1 + X2
B3 = (((AC1 * 4 + X3) << OVERSAMPLE) + 2) >> 2
X1 = AC3 * B6 >> 13
X2 = (B1 * B62) >> 16
X3 = ((X1 + X2) + 2) >> 2
B4 = (AC4 * (X3 + 32768)) >> 15
B7 = (UP - B3) * (50000 >> OVERSAMPLE)
P = (B7 * 2) / B4
X1 = (P >> 8) * (P >> 8)
X1 = (X1 * 3038) >> 16
X2 = (-7357 * P) >> 16
pressure = P + ((X1 + X2 + 3791) >> 4)
return (temperature/10.0,pressure/ 100.0)
def make_stream(stamps, y_data, name, token, max_data_points):
print(token)
# Plot all the history data as well
url = py.plot([
{
'x': stamps, 'y': y_data, 'type': 'scatter',
'stream': {
'token': token,
'maxpoints': max_data_points
}
}], filename=name, auto_open=False)
print("View your streaming graph here: %s "% url)
print("\n\n")
return url
def open_streams(plotly_user_config, names, data, max_data_points):
print("Attempting to open the streams to plotly")
sys.stdout.flush()
py.sign_in(plotly_user_config["plotly_username"], plotly_user_config["plotly_api_key"])
stamps = list(data['stamps'])
data_len = data['temp1'].get_up_to(max_data_points).size
if len(stamps) < data_len:
data_len = len(stamps)
stamps = stamps[0:data_len]
tokens = plotly_user_config['plotly_streaming_tokens']
#print(list(data['temp1'].get_partial()))
#sys.stdout.flush()
url_temp1 = make_stream(stamps, list(data['temp1'].get_up_to(data_len)), names[0], tokens[0], max_data_points)
url_temp2 = make_stream(stamps, list(data['temp2'].get_up_to(data_len)), names[1], tokens[1], max_data_points)
url_temp3 = make_stream(stamps, list(data['temp3'].get_up_to(data_len)), names[2], tokens[2], max_data_points)
url_humidity = make_stream(stamps, list(data['humidity'].get_up_to(data_len)), names[3], tokens[3], max_data_points)
url_pressure = make_stream(stamps, list(data['pressure'].get_up_to(data_len)), names[4], tokens[4], max_data_points)
stream_list = []
for token in plotly_user_config['plotly_streaming_tokens']:
cur_stream = py.Stream(token)
cur_stream.open()
stream_list.append(cur_stream)
return stream_list
def print_data_to_html(data):
stamps = list(data['stamps'])
time_ax = dates.date2num(stamps)
hfmt = dates.DateFormatter('%H:%M - %m/%d')
filt_l = 9
plt.figure(figsize=(15, 15), dpi=100)
ax1 = plt.subplot(511)
ax1.set_title(("Temperature 1 and 2, updated at %s"%datetime.datetime.now() ))
ax1.xaxis.set_major_locator(dates.HourLocator())
ax1.xaxis.set_major_formatter(hfmt)
plt.xticks(rotation='vertical')
plt.plot(time_ax, scipy.signal.medfilt(data['temp1'].get_partial(),filt_l), 'b-', time_ax, scipy.signal.medfilt(data['temp2'].get_partial(),filt_l), 'r-')
ax2 = plt.subplot(512)
ax2.set_title("Case temperature")
ax2.xaxis.set_major_locator(dates.HourLocator())
ax2.xaxis.set_major_formatter(hfmt)
plt.xticks(rotation='vertical')
plt.plot(time_ax, scipy.signal.medfilt(data['temp3'].get_partial(),filt_l), 'b-')
ax3 = plt.subplot(513)
ax3.set_title("Humidity")
ax3.xaxis.set_major_locator(dates.HourLocator())
ax3.xaxis.set_major_formatter(hfmt)
plt.xticks(rotation='vertical')
plt.plot(time_ax, scipy.signal.medfilt(data['humidity'].get_partial(),filt_l), 'b-')
ax4 = plt.subplot(514)
ax4.set_title("Pressure")
ax4.xaxis.set_major_locator(dates.HourLocator())
ax4.xaxis.set_major_formatter(hfmt)
plt.xticks(rotation='vertical')
plt.plot(time_ax, scipy.signal.medfilt(data['pressure'].get_partial(),filt_l), 'b-')
ax5 = plt.subplot(515)
ax5.set_title("Humidity vs pressure")
plt.xticks(rotation='vertical')
plt.plot(data['humidity'].get_partial(), data['pressure'].get_partial(), 'rx')
plt.savefig("/var/www/html/data.png")
# Cleaning up
plt.clf()
plt.close()
#with open("/var/www/html/index.html", 'w+') as output:
#mpld3.save_html(plt, output)
def main():
print("Setting up the sensors")
setup()
print("Starting the weatherstation")
data_dump_file = "/home/pi/data.dump"
#max_data_points = 15000000 # Roughly 4 samples/min to keep a years worth of data
#max_data_points = 40000
max_data_points = 100000
max_data_points_plot = 10000
resolution_secs = 5
with open('/home/pi/station/.config.json') as config_file:
plotly_user_config = json.load(config_file)
# Loading the data from the disk if it exists
initialise_data = True
if os.path.isfile(data_dump_file):
print("Loading the data from the disk dump")
try:
with open(data_dump_file, 'rb') as input:
data = pickle.load(input)
if data['maxlen'] == max_data_points:
print("Data has the same amount of data points, not initializing")
initialise_data=False
except:
print("Loading data failed, re-initialising")
initialise_data=True
if initialise_data:
print("Re-initializing the data")
data = {}
data['stamps'] = deque([], maxlen=max_data_points)
data['temp1'] = RingBuffer(size_max=max_data_points)
data['temp2'] = RingBuffer(size_max=max_data_points)
data['temp3'] = RingBuffer(size_max=max_data_points)
data['humidity'] = RingBuffer(size_max=max_data_points)
data['pressure'] = RingBuffer(size_max=max_data_points)
data['maxlen'] = max_data_points
names = ['Temperature probe 1(F)', 'Temperature probe 2(F)', 'Temperature case(F)', 'Humidity(%)', 'Pressure(mbar)', 'Pressure vs humidity']
streams = []
successfully_opened = False
last_call = datetime.datetime.fromtimestamp(0)
last_save_call = datetime.datetime.now()
last_read_call = datetime.datetime.fromtimestamp(0)
while True:
# Checking if we should dump the data to disk
duration_since_last_read = datetime.datetime.now() - last_read_call
# Throttling the data read rate
while duration_since_last_read.total_seconds() < resolution_secs:
duration_since_last_read = datetime.datetime.now() - last_read_call
time.sleep(0.5)
last_read_call = datetime.datetime.now()
temp_1 = read_temperature_from("28-041663688cff")
temp_2 = read_temperature_from("28-0316643ddcff")
#(chip_id, chip_version) = readBmp180Id()
(temperature_pres,pressure)=readBmp180()
humidity, temperature = Adafruit_DHT.read_retry(11, 22)
print("Temp1: %s, Temp2: %s, Temp pressure sens: %s, Humidity: %s, Pressure: %s"% (temp_1, temp_2, temperature_pres, humidity, pressure))
# Saving all the data to the queue
data['stamps'].append(datetime.datetime.now())
data['temp1'].append(temp_1)
data['temp2'].append(temp_2)
data['temp3'].append(temperature)
data['humidity'].append(humidity)
data['pressure'].append(pressure)
if not successfully_opened:
# Ratelimiting the calls to the service
duration_since_last = datetime.datetime.now() - last_call
if duration_since_last.total_seconds() < 60*60:
print("Not requesting the streams yet")
else:
print("%s seconds has elapsed, trying the streams again"%duration_since_last.total_seconds())
last_call = datetime.datetime.now()
try:
streams = open_streams(plotly_user_config, names, data, max_data_points_plot)
successfully_opened = True
except:
print("Could not open the streams:", sys.exc_info()[0])
else:
try:
print("Writing the info to the streams")
streams[0].write({'x': datetime.datetime.now(), 'y': temp_1})
streams[1].write({'x': datetime.datetime.now(), 'y': temp_2})
streams[2].write({'x': datetime.datetime.now(), 'y': temperature})
streams[3].write({'x': datetime.datetime.now(), 'y': humidity})
streams[4].write({'x': datetime.datetime.now(), 'y': pressure})
streams[5].write({'x': temp_1, 'y': humidity})
except:
print("Could not print to streams:", sys.exc_info()[0])
successfully_opened = False
# Checking if we should dump the data to disk
duration_since_last_save = datetime.datetime.now() - last_save_call
if duration_since_last_save.total_seconds() < 60:
print("Not saving the data yet")
else:
print("Saving the data to disk")
print_data_to_html(data)
last_save_call = datetime.datetime.now()
with open(data_dump_file, 'w+') as output:
pickle.dump(data, output, pickle.HIGHEST_PROTOCOL)
time.sleep(0.5)
if __name__=="__main__":
main()
| gpl-3.0 |
NeuralEnsemble/elephant | elephant/sta.py | 2 | 13537 | # -*- coding: utf-8 -*-
"""
Functions to calculate spike-triggered average and spike-field coherence of
analog signals.
.. autosummary::
:toctree: _toctree/sta
spike_triggered_average
spike_field_coherence
:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import warnings
import numpy as np
import quantities as pq
import scipy.signal
from neo.core import AnalogSignal, SpikeTrain
from .conversion import BinnedSpikeTrain
__all__ = [
"spike_triggered_average",
"spike_field_coherence"
]
def spike_triggered_average(signal, spiketrains, window):
"""
Calculates the spike-triggered averages of analog signals in a time window
relative to the spike times of a corresponding spiketrain for multiple
signals each. The function receives n analog signals and either one or
n spiketrains. In case it is one spiketrain this one is muliplied n-fold
and used for each of the n analog signals.
Parameters
----------
signal : neo AnalogSignal object
'signal' contains n analog signals.
spiketrains : one SpikeTrain or one numpy ndarray or a list of n of either of these.
'spiketrains' contains the times of the spikes in the spiketrains.
window : tuple of 2 Quantity objects with dimensions of time.
'window' is the start time and the stop time, relative to a spike, of
the time interval for signal averaging.
If the window size is not a multiple of the sampling interval of the
signal the window will be extended to the next multiple.
Returns
-------
result_sta : neo AnalogSignal object
'result_sta' contains the spike-triggered averages of each of the
analog signals with respect to the spikes in the corresponding
spiketrains. The length of 'result_sta' is calculated as the number
of bins from the given start and stop time of the averaging interval
and the sampling rate of the analog signal. If for an analog signal
no spike was either given or all given spikes had to be ignored
because of a too large averaging interval, the corresponding returned
analog signal has all entries as nan. The number of used spikes and
unused spikes for each analog signal are returned as annotations to
the returned AnalogSignal object.
Examples
--------
>>> signal = neo.AnalogSignal(np.array([signal1, signal2]).T, units='mV',
... sampling_rate=10/ms)
>>> stavg = spike_triggered_average(signal, [spiketrain1, spiketrain2],
... (-5 * ms, 10 * ms))
"""
# checking compatibility of data and data types
# window_starttime: time to specify the start time of the averaging
# interval relative to a spike
# window_stoptime: time to specify the stop time of the averaging
# interval relative to a spike
window_starttime, window_stoptime = window
if not (isinstance(window_starttime, pq.quantity.Quantity) and
window_starttime.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality):
raise TypeError("The start time of the window (window[0]) "
"must be a time quantity.")
if not (isinstance(window_stoptime, pq.quantity.Quantity) and
window_stoptime.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality):
raise TypeError("The stop time of the window (window[1]) "
"must be a time quantity.")
if window_stoptime <= window_starttime:
raise ValueError("The start time of the window (window[0]) must be "
"earlier than the stop time of the window (window[1]).")
# checks on signal
if not isinstance(signal, AnalogSignal):
raise TypeError(
"Signal must be an AnalogSignal, not %s." % type(signal))
if len(signal.shape) > 1:
# num_signals: number of analog signals
num_signals = signal.shape[1]
else:
raise ValueError("Empty analog signal, hence no averaging possible.")
if window_stoptime - window_starttime > signal.t_stop - signal.t_start:
raise ValueError("The chosen time window is larger than the "
"time duration of the signal.")
# spiketrains type check
if isinstance(spiketrains, (np.ndarray, SpikeTrain)):
spiketrains = [spiketrains]
elif isinstance(spiketrains, list):
for st in spiketrains:
if not isinstance(st, (np.ndarray, SpikeTrain)):
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a "
"list of one of those, not %s." % type(spiketrains))
else:
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a list of "
"one of those, not %s." % type(spiketrains))
# multiplying spiketrain in case only a single spiketrain is given
if len(spiketrains) == 1 and num_signals != 1:
template = spiketrains[0]
spiketrains = []
for i in range(num_signals):
spiketrains.append(template)
# checking for matching numbers of signals and spiketrains
if num_signals != len(spiketrains):
raise ValueError(
"The number of signals and spiketrains has to be the same.")
# checking the times of signal and spiketrains
for i in range(num_signals):
if spiketrains[i].t_start < signal.t_start:
raise ValueError(
"The spiketrain indexed by %i starts earlier than "
"the analog signal." % i)
if spiketrains[i].t_stop > signal.t_stop:
raise ValueError(
"The spiketrain indexed by %i stops later than "
"the analog signal." % i)
# *** Main algorithm: ***
# window_bins: number of bins of the chosen averaging interval
window_bins = int(np.ceil(((window_stoptime - window_starttime) *
signal.sampling_rate).simplified))
# result_sta: array containing finally the spike-triggered averaged signal
result_sta = AnalogSignal(np.zeros((window_bins, num_signals)),
sampling_rate=signal.sampling_rate, units=signal.units)
# setting of correct times of the spike-triggered average
# relative to the spike
result_sta.t_start = window_starttime
used_spikes = np.zeros(num_signals, dtype=int)
unused_spikes = np.zeros(num_signals, dtype=int)
total_used_spikes = 0
for i in range(num_signals):
# summing over all respective signal intervals around spiketimes
for spiketime in spiketrains[i]:
# checks for sufficient signal data around spiketime
if (spiketime + window_starttime >= signal.t_start and
spiketime + window_stoptime <= signal.t_stop):
# calculating the startbin in the analog signal of the
# averaging window for spike
startbin = int(np.floor(((spiketime + window_starttime -
signal.t_start) * signal.sampling_rate).simplified))
# adds the signal in selected interval relative to the spike
result_sta[:, i] += signal[
startbin: startbin + window_bins, i]
# counting of the used spikes
used_spikes[i] += 1
else:
# counting of the unused spikes
unused_spikes[i] += 1
# normalization
result_sta[:, i] = result_sta[:, i] / used_spikes[i]
total_used_spikes += used_spikes[i]
if total_used_spikes == 0:
warnings.warn(
"No spike at all was either found or used for averaging")
result_sta.annotate(used_spikes=used_spikes, unused_spikes=unused_spikes)
return result_sta
def spike_field_coherence(signal, spiketrain, **kwargs):
"""
Calculates the spike-field coherence between a analog signal(s) and a
(binned) spike train.
The current implementation makes use of scipy.signal.coherence(). Additional
kwargs will will be directly forwarded to scipy.signal.coherence(),
except for the axis parameter and the sampling frequency, which will be
extracted from the input signals.
The spike_field_coherence function receives an analog signal array and
either a binned spike train or a spike train containing the original spike
times. In case of original spike times the spike train is binned according
to the sampling rate of the analog signal array.
The AnalogSignal object can contain one or multiple signal traces. In case
of multiple signal traces, the spike field coherence is calculated
individually for each signal trace and the spike train.
Parameters
----------
signal : neo AnalogSignal object
'signal' contains n analog signals.
spiketrain : SpikeTrain or BinnedSpikeTrain
Single spike train to perform the analysis on. The bin_size of the
binned spike train must match the sampling_rate of signal.
**kwargs:
All kwargs are passed to `scipy.signal.coherence()`.
Returns
-------
coherence : complex Quantity array
contains the coherence values calculated for each analog signal trace
in combination with the spike train. The first dimension corresponds to
the frequency, the second to the number of the signal trace.
frequencies : Quantity array
contains the frequency values corresponding to the first dimension of
the 'coherence' array
Examples
--------
Plot the SFC between a regular spike train at 20 Hz, and two sinusoidal
time series at 20 Hz and 23 Hz, respectively.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from quantities import s, ms, mV, Hz, kHz
>>> import neo, elephant
>>> t = pq.Quantity(range(10000),units='ms')
>>> f1, f2 = 20. * Hz, 23. * Hz
>>> signal = neo.AnalogSignal(np.array([
np.sin(f1 * 2. * np.pi * t.rescale(s)),
np.sin(f2 * 2. * np.pi * t.rescale(s))]).T,
units=pq.mV, sampling_rate=1. * kHz)
>>> spiketrain = neo.SpikeTrain(
range(t[0], t[-1], 50), units='ms',
t_start=t[0], t_stop=t[-1])
>>> sfc, freqs = elephant.sta.spike_field_coherence(
signal, spiketrain, window='boxcar')
>>> plt.plot(freqs, sfc[:,0])
>>> plt.plot(freqs, sfc[:,1])
>>> plt.xlabel('Frequency [Hz]')
>>> plt.ylabel('SFC')
>>> plt.xlim((0, 60))
>>> plt.show()
"""
if not hasattr(scipy.signal, 'coherence'):
raise AttributeError('scipy.signal.coherence is not available. The sfc '
'function uses scipy.signal.coherence for '
'the coherence calculation. This function is '
'available for scipy version 0.16 or newer. '
'Please update you scipy version.')
# spiketrains type check
if not isinstance(spiketrain, (SpikeTrain, BinnedSpikeTrain)):
raise TypeError(
"spiketrain must be of type SpikeTrain or BinnedSpikeTrain, "
"not %s." % type(spiketrain))
# checks on analogsignal
if not isinstance(signal, AnalogSignal):
raise TypeError(
"Signal must be an AnalogSignal, not %s." % type(signal))
if len(signal.shape) > 1:
# num_signals: number of individual traces in the analog signal
num_signals = signal.shape[1]
elif len(signal.shape) == 1:
num_signals = 1
else:
raise ValueError("Empty analog signal.")
len_signals = signal.shape[0]
# bin spiketrain if necessary
if isinstance(spiketrain, SpikeTrain):
spiketrain = BinnedSpikeTrain(
spiketrain, bin_size=signal.sampling_period)
# check the start and stop times of signal and spike trains
if spiketrain.t_start < signal.t_start:
raise ValueError(
"The spiketrain starts earlier than the analog signal.")
if spiketrain.t_stop > signal.t_stop:
raise ValueError(
"The spiketrain stops later than the analog signal.")
# check equal time resolution for both signals
if spiketrain.bin_size != signal.sampling_period:
raise ValueError(
"The spiketrain and signal must have a "
"common sampling frequency / bin_size")
# calculate how many bins to add on the left of the binned spike train
delta_t = spiketrain.t_start - signal.t_start
if delta_t % spiketrain.bin_size == 0:
left_edge = int((delta_t / spiketrain.bin_size).magnitude)
else:
raise ValueError("Incompatible binning of spike train and LFP")
right_edge = int(left_edge + spiketrain.n_bins)
# duplicate spike trains
spiketrain_array = np.zeros((1, len_signals))
spiketrain_array[0, left_edge:right_edge] = spiketrain.to_array()
spiketrains_array = np.repeat(spiketrain_array, repeats=num_signals, axis=0).transpose()
# calculate coherence
frequencies, sfc = scipy.signal.coherence(
spiketrains_array, signal.magnitude,
fs=signal.sampling_rate.rescale('Hz').magnitude,
axis=0, **kwargs)
return (pq.Quantity(sfc, units=pq.dimensionless),
pq.Quantity(frequencies, units=pq.Hz))
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/tools/datetimes.py | 1 | 35837 | from collections import abc
from datetime import datetime, time
from functools import partial
from typing import Optional, TypeVar, Union
import numpy as np
from pandas._libs import tslib, tslibs
from pandas._libs.tslibs import Timestamp, conversion, parsing
from pandas._libs.tslibs.parsing import ( # noqa
DateParseError,
_format_is_iso,
_guess_datetime_format,
parse_time_string,
)
from pandas._libs.tslibs.strptime import array_strptime
from pandas.util._decorators import deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_object,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCIndex,
ABCIndexClass,
ABCSeries,
)
from pandas.core.dtypes.missing import notna
from pandas._typing import ArrayLike
from pandas.core import algorithms
from pandas.core.algorithms import unique
# ---------------------------------------------------------------------
# types used in annotations
ArrayConvertible = Union[list, tuple, ArrayLike, ABCSeries]
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# types used in annotations
Scalar = Union[int, float, str]
DatetimeScalar = TypeVar("DatetimeScalar", Scalar, datetime)
DatetimeScalarOrArrayConvertible = Union[
DatetimeScalar, list, tuple, ArrayLike, ABCSeries
]
# ---------------------------------------------------------------------
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
def should_cache(
arg: ArrayConvertible, unique_share: float = 0.7, check_count: Optional[int] = None
) -> bool:
"""
Decides whether to do caching.
If the percent of unique elements among `check_count` elements less
than `unique_share * 100` then we can do caching.
Parameters
----------
arg: listlike, tuple, 1-d array, Series
unique_share: float, default=0.7, optional
0 < unique_share < 1
check_count: int, optional
0 <= check_count <= len(arg)
Returns
-------
do_caching: bool
Notes
-----
By default for a sequence of less than 50 items in size, we don't do
caching; for the number of elements less than 5000, we take ten percent of
all elements to check for a uniqueness share; if the sequence size is more
than 5000, then we check only the first 500 elements.
All constants were chosen empirically by.
"""
do_caching = True
# default realization
if check_count is None:
# in this case, the gain from caching is negligible
if len(arg) <= 50:
return False
if len(arg) <= 5000:
check_count = int(len(arg) * 0.1)
else:
check_count = 500
else:
assert (
0 <= check_count <= len(arg)
), "check_count must be in next bounds: [0; len(arg)]"
if check_count == 0:
return False
assert 0 < unique_share < 1, "unique_share must be in next bounds: (0; 1)"
unique_elements = unique(arg[:check_count])
if len(unique_elements) > check_count * unique_share:
do_caching = False
return do_caching
def _maybe_cache(arg, format, cache, convert_listlike):
"""
Create a cache of unique dates from an array of dates
Parameters
----------
arg : listlike, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : boolean
True attempts to create a cache of converted values
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty
"""
from pandas import Series
cache_array = Series()
if cache:
# Perform a quicker unique check
if not should_cache(arg):
return cache_array
unique_dates = unique(arg)
if len(unique_dates) < len(arg):
cache_dates = convert_listlike(unique_dates, True, format)
cache_array = Series(cache_dates, index=unique_dates)
return cache_array
def _box_as_indexlike(
dt_array: ArrayLike, utc: Optional[bool] = None, name: Optional[str] = None
) -> Union[ABCIndex, ABCDatetimeIndex]:
"""
Properly boxes the ndarray of datetimes to DatetimeIndex
if it is possible or to generic Index instead
Parameters
----------
dt_array: 1-d array
array of datetimes to be boxed
tz : object
None or 'utc'
name : string, default None
Name for a resulting index
Returns
-------
result : datetime of converted dates
- DatetimeIndex if convertible to sole datetime64 type
- general Index otherwise
"""
from pandas import DatetimeIndex, Index
if is_datetime64_dtype(dt_array):
tz = "utc" if utc else None
return DatetimeIndex(dt_array, tz=tz, name=name)
return Index(dt_array, name=name)
def _convert_and_box_cache(
arg: DatetimeScalarOrArrayConvertible,
cache_array: ABCSeries,
box: bool,
name: Optional[str] = None,
) -> Union[ABCIndex, np.ndarray]:
"""
Convert array of dates with a cache and box the result
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
- Index-like if box=True
- ndarray if box=False
"""
from pandas import Series
result = Series(arg).map(cache_array)
if box:
return _box_as_indexlike(result, utc=None, name=name)
return result.values
def _return_parsed_timezone_results(result, timezones, box, tz, name):
"""
Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
box : boolean
True boxes result as an Index-like, False returns an ndarray
tz : object
None or pytz timezone object
name : string, default None
Name for a DatetimeIndex
Returns
-------
tz_result : ndarray of parsed dates with timezone
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
if tz is not None:
raise ValueError(
"Cannot pass a tz argument when "
"parsing strings with timezone "
"information."
)
tz_results = np.array(
[Timestamp(res).tz_localize(zone) for res, zone in zip(result, timezones)]
)
if box:
from pandas import Index
return Index(tz_results, name=name)
return tz_results
def _convert_listlike_datetimes(
arg,
box,
format,
name=None,
tz=None,
unit=None,
errors=None,
infer_datetime_format=None,
dayfirst=None,
yearfirst=None,
exact=None,
):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
from pandas import DatetimeIndex
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import (
maybe_convert_dtype,
objects_to_datetime64ns,
)
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype="O")
# these are shortcutable
if is_datetime64tz_dtype(arg):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == "utc":
arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, (DatetimeArray, DatetimeIndex)):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
elif tz:
# DatetimeArray, DatetimeIndex
return arg.tz_localize(tz)
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, "values", arg)
result, tz_parsed = tslib.array_with_unit_to_datetime(arg, unit, errors=errors)
if box:
if errors == "ignore":
from pandas import Index
result = Index(result, name=name)
else:
result = DatetimeIndex(result, name=name)
# GH 23758: We may still need to localize the result with tz
# GH 25546: Apply tz_parsed first (from arg), then tz (from caller)
# result will be naive but in UTC
try:
result = result.tz_localize("UTC").tz_convert(tz_parsed)
except AttributeError:
# Regular Index from 'ignore' path
return result
if tz is not None:
if result.tz is None:
result = result.tz_localize(tz)
else:
result = result.tz_convert(tz)
return result
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, datetime, list, tuple, " "1-d array, or Series"
)
# warn if passing timedelta64, raise for PeriodDtype
# NB: this must come after unit transformation
orig_arg = arg
arg, _ = maybe_convert_dtype(arg, copy=False)
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
tz_parsed = None
result = None
if format is not None:
try:
# shortcut formatting here
if format == "%Y%m%d":
try:
# pass orig_arg as float-dtype may have been converted to
# datetime64[ns]
orig_arg = ensure_object(orig_arg)
result = _attempt_YYYYMMDD(orig_arg, errors=errors)
except (ValueError, TypeError, tslibs.OutOfBoundsDatetime):
raise ValueError(
"cannot convert the input to " "'%Y%m%d' date format"
)
# fallback
if result is None:
try:
result, timezones = array_strptime(
arg, format, exact=exact, errors=errors
)
if "%Z" in format or "%z" in format:
return _return_parsed_timezone_results(
result, timezones, box, tz, name
)
except tslibs.OutOfBoundsDatetime:
if errors == "raise":
raise
elif errors == "coerce":
result = np.empty(arg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == "raise":
raise
elif errors == "coerce":
result = np.empty(arg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError as e:
# Fallback to try to convert datetime objects if timezone-aware
# datetime objects are found without passing `utc=True`
try:
values, tz = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if result is None:
assert format is None or infer_datetime_format
utc = tz == "utc"
result, tz_parsed = objects_to_datetime64ns(
arg,
dayfirst=dayfirst,
yearfirst=yearfirst,
utc=utc,
errors=errors,
require_iso8601=require_iso8601,
allow_object=True,
)
if tz_parsed is not None:
if box:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
return DatetimeIndex._simple_new(result, name=name, tz=tz_parsed)
else:
# Convert the datetime64 numpy array to an numpy array
# of datetime objects
result = [Timestamp(ts, tz=tz_parsed).to_pydatetime() for ts in result]
return np.array(result, dtype=object)
if box:
utc = tz == "utc"
return _box_as_indexlike(result, utc=utc, name=name)
return result
def _adjust_to_origin(arg, origin, unit):
"""
Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : string
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s)
"""
if origin == "julian":
original = arg
j0 = Timestamp(0).to_julian_date()
if unit != "D":
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
except TypeError:
raise ValueError("incompatible 'arg' type for given " "'origin'='julian'")
# preemptively check this for a nice range
j_max = Timestamp.max.to_julian_date() - j0
j_min = Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise tslibs.OutOfBoundsDatetime(
"{original} is Out of Bounds for "
"origin='julian'".format(original=original)
)
else:
# arg must be numeric
if not (
(is_scalar(arg) and (is_integer(arg) or is_float(arg)))
or is_numeric_dtype(np.asarray(arg))
):
raise ValueError(
"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified ".format(
arg=arg, origin=origin
)
)
# we are going to offset back to unix / epoch time
try:
offset = Timestamp(origin)
except tslibs.OutOfBoundsDatetime:
raise tslibs.OutOfBoundsDatetime(
"origin {origin} is Out of Bounds".format(origin=origin)
)
except ValueError:
raise ValueError(
"origin {origin} cannot be converted "
"to a Timestamp".format(origin=origin)
)
if offset.tz is not None:
raise ValueError("origin offset {} must be tz-naive".format(offset))
offset -= Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
offset = offset // tslibs.Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(
arg, (ABCSeries, ABCIndexClass, np.ndarray)
):
arg = np.asarray(arg)
arg = arg + offset
return arg
@deprecate_kwarg(old_arg_name="box", new_arg_name=None)
def to_datetime(
arg,
errors="raise",
dayfirst=False,
yearfirst=False,
utc=None,
box=True,
format=None,
exact=True,
unit=None,
infer_datetime_format=False,
origin="unix",
cache=True,
):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
.. versionadded:: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil behavior).
.. versionadded:: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex or Index-like object
- If False returns ndarray of values.
.. deprecated:: 0.25.0
Use :meth:`Series.to_numpy` or :meth:`Timestamp.to_datetime64`
instead to get an ndarray of values or numpy.datetime64,
respectively.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
See strftime documentation for more information on choices:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default is 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
.. versionadded:: 0.20.0
cache : boolean, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.23.0
.. versionchanged:: 0.25.0
- changed default value from False to True
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True) # doctest: +SKIP
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False) # doctest: +SKIP
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
... origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'], \
dtype='datetime64[ns]', freq=None)
"""
if arg is None:
return None
if origin != "unix":
arg = _adjust_to_origin(arg, origin, unit)
tz = "utc" if utc else None
convert_listlike = partial(
_convert_listlike_datetimes,
tz=tz,
unit=unit,
dayfirst=dayfirst,
yearfirst=yearfirst,
errors=errors,
exact=exact,
infer_datetime_format=infer_datetime_format,
)
if isinstance(arg, Timestamp):
result = arg
if tz is not None:
if arg.tz is not None:
result = result.tz_convert(tz)
else:
result = result.tz_localize(tz)
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
values = convert_listlike(arg._values, True, format)
result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, box, tz)
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, name=arg.name)
else:
convert_listlike = partial(convert_listlike, name=arg.name)
result = convert_listlike(arg, box, format)
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box)
else:
result = convert_listlike(arg, box, format)
else:
result = convert_listlike(np.array([arg]), box, format)[0]
return result
# mappings for assembling units
_unit_map = {
"year": "year",
"years": "year",
"month": "month",
"months": "month",
"day": "day",
"days": "day",
"hour": "h",
"hours": "h",
"minute": "m",
"minutes": "m",
"second": "s",
"seconds": "s",
"ms": "ms",
"millisecond": "ms",
"milliseconds": "ms",
"us": "us",
"microsecond": "us",
"microseconds": "us",
"ns": "ns",
"nanosecond": "ns",
"nanoseconds": "ns",
}
def _assemble_from_unit_mappings(arg, errors, box, tz):
"""
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
box : boolean
- If True, return a DatetimeIndex
- If False, return an array
tz : None or 'utc'
Returns
-------
Series
"""
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ["year", "month", "day"]
req = sorted(list(set(required) - set(unit_rev.keys())))
if len(req):
raise ValueError(
"to assemble mappings requires at least that "
"[year, month, day] be specified: [{required}] "
"is missing".format(required=",".join(req))
)
# keys we don't recognize
excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))
if len(excess):
raise ValueError(
"extra keys have been passed "
"to the datetime assemblage: "
"[{excess}]".format(excess=",".join(excess))
)
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent overflow in case of int8 or int16
if is_integer_dtype(values):
values = values.astype("int64", copy=False)
return values
values = (
coerce(arg[unit_rev["year"]]) * 10000
+ coerce(arg[unit_rev["month"]]) * 100
+ coerce(arg[unit_rev["day"]])
)
try:
values = to_datetime(values, format="%Y%m%d", errors=errors, utc=tz)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the " "datetimes: {error}".format(error=e))
for u in ["h", "m", "s", "ms", "us", "ns"]:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]), unit=u, errors=errors)
except (TypeError, ValueError) as e:
raise ValueError(
"cannot assemble the datetimes [{value}]: "
"{error}".format(value=value, error=e)
)
if not box:
return values.values
return values
def _attempt_YYYYMMDD(arg, errors):
"""
try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day(
carg / 10000, carg / 100 % 100, carg % 100
)
return tslib.array_to_datetime(parsed, errors=errors)[0]
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult[~mask] = tslibs.iNaT
masked_result = calc(carg[mask].astype(np.float64).astype(np.int64))
result[mask] = masked_result.astype("M8[ns]")
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except (ValueError, OverflowError):
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except (ValueError, OverflowError):
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslib.nat_strings))
return calc_with_mask(arg, mask)
except (ValueError, OverflowError):
pass
return None
# Fixed time formats for time parsing
_time_formats = [
"%H:%M",
"%H%M",
"%I:%M%p",
"%I%M%p",
"%H:%M:%S",
"%H%M%S",
"%I:%M:%S%p",
"%I%M%S%p",
]
def _guess_time_format_for_array(arr):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
element = arr[non_nan_elements[0]]
for time_format in _time_formats:
try:
datetime.strptime(element, time_format)
return time_format
except ValueError:
pass
return None
def to_time(arg, format=None, infer_time_format=False, errors="raise"):
"""
Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time
"""
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype="O")
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, datetime, list, tuple, " "1-d array, or Series"
)
arg = ensure_object(arg)
if infer_time_format and format is None:
format = _guess_time_format_for_array(arg)
times = []
if format is not None:
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if errors == "raise":
msg = (
"Cannot convert {element} to a time with given "
"format {format}"
).format(element=element, format=format)
raise ValueError(msg)
elif errors == "ignore":
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element, time_format).time()
if not format_found:
# Put the found format in front
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if time_object is not None:
times.append(time_object)
elif errors == "raise":
raise ValueError(
"Cannot convert arg {arg} to " "a time".format(arg=arg)
)
elif errors == "ignore":
return arg
else:
times.append(None)
return times
if arg is None:
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, ABCSeries):
values = _convert_listlike(arg._values, format)
return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0]
| apache-2.0 |
ChristophKirst/ClearMapUnstable | docs/_build/html/imageanalysis-11.py | 4 | 1303 | import os
import ClearMap.Settings as settings
filename = os.path.join(settings.ClearMapPath, 'Test/Data/ImageAnalysis/cfos-substack.tif');
import ClearMap.Visualization.Plot as plt
import ClearMap.IO as io
data = io.readData(filename, z = (0,26));
import ClearMap.ImageProcessing.BackgroundRemoval as bgr
dataBGR = bgr.removeBackground(data.astype('float'), size=(3,3), verbose = True);
from ClearMap.ImageProcessing.Filter.DoGFilter import filterDoG
dataDoG = filterDoG(dataBGR, size=(8,8,4), verbose = True);
from ClearMap.ImageProcessing.MaximaDetection import findExtendedMaxima
dataMax = findExtendedMaxima(dataDoG, hMax = None, verbose = True, threshold = 10);
from ClearMap.ImageProcessing.MaximaDetection import findCenterOfMaxima
cells = findCenterOfMaxima(data, dataMax);
from ClearMap.ImageProcessing.CellSizeDetection import detectCellShape
dataShape = detectCellShape(dataDoG, cells, threshold = 15);
from ClearMap.ImageProcessing.CellSizeDetection import findCellSize, findCellIntensity
cellSizes = findCellSize(dataShape, maxLabel = cells.shape[0]);
cellIntensities = findCellIntensity(dataBGR, dataShape, maxLabel = cells.shape[0]);
import matplotlib.pyplot as mpl
mpl.figure()
mpl.plot(cellSizes, cellIntensities, '.')
mpl.xlabel('cell size [voxel]')
mpl.ylabel('cell intensity [au]') | gpl-3.0 |
B3AU/waveTree | examples/cluster/plot_dbscan.py | 6 | 2522 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples = db.core_sample_indices_
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import pylab as pl
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = pl.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
markersize = 6
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples
if labels[index] == k]
for index in class_members:
x = X[index]
if index in core_samples and k != -1:
markersize = 14
else:
markersize = 6
pl.plot(x[0], x[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=markersize)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show()
| bsd-3-clause |
JohanComparat/pyEmerge | bin/lc_wedge_plot.py | 1 | 5144 | """
+ + + + + + + HEADER + + + + + + + + +
file_name lc_remaped_position_L3_.hdf5
HDF5_Version 1.8.18
h5py_version 2.7.1
+ + + + + + + DATA + + + + + + + + + +
========================================
agn_properties <HDF5 group "/agn_properties" (2 members)>
- - - - - - - - - - - - - - - - - - - -
agn_activity <HDF5 dataset "agn_activity": shape (23191107,), type "<f8">
log_lambda_sar <HDF5 dataset "log_lambda_sar": shape (23191107,), type "<f8">
========================================
emerge_data <HDF5 group "/emerge_data" (3 members)>
- - - - - - - - - - - - - - - - - - - -
dMdt <HDF5 dataset "dMdt": shape (23191107,), type "<f8">
mvir_dot <HDF5 dataset "mvir_dot": shape (23191107,), type "<f8">
rvir_dot <HDF5 dataset "rvir_dot": shape (23191107,), type "<f8">
========================================
halo_position <HDF5 group "/halo_position" (7 members)>
- - - - - - - - - - - - - - - - - - - -
vx <HDF5 dataset "vx": shape (23191107,), type "<f8">
vy <HDF5 dataset "vy": shape (23191107,), type "<f8">
vz <HDF5 dataset "vz": shape (23191107,), type "<f8">
x <HDF5 dataset "x": shape (23191107,), type "<f8">
y <HDF5 dataset "y": shape (23191107,), type "<f8">
z <HDF5 dataset "z": shape (23191107,), type "<f8">
z_snap <HDF5 dataset "z_snap": shape (23191107,), type "<f8">
========================================
halo_properties <HDF5 group "/halo_properties" (7 members)>
- - - - - - - - - - - - - - - - - - - -
Mpeak <HDF5 dataset "Mpeak": shape (23191107,), type "<f8">
Vmax <HDF5 dataset "Vmax": shape (23191107,), type "<f8">
id <HDF5 dataset "id": shape (23191107,), type "<f8">
mvir <HDF5 dataset "mvir": shape (23191107,), type "<f8">
pid <HDF5 dataset "pid": shape (23191107,), type "<f8">
rs <HDF5 dataset "rs": shape (23191107,), type "<f8">
rvir <HDF5 dataset "rvir": shape (23191107,), type "<f8">
========================================
moster_2013_data <HDF5 group "/moster_2013_data" (1 members)>
- - - - - - - - - - - - - - - - - - - -
stellar_mass <HDF5 dataset "stellar_mass": shape (23191107,), type "<f8">
========================================
sky_position <HDF5 group "/sky_position" (5 members)>
- - - - - - - - - - - - - - - - - - - -
DEC <HDF5 dataset "DEC": shape (23191107,), type "<f8">
RA <HDF5 dataset "RA": shape (23191107,), type "<f8">
redshift_R <HDF5 dataset "redshift_R": shape (23191107,), type "<f8">
redshift_S <HDF5 dataset "redshift_S": shape (23191107,), type "<f8">
selection <HDF5 dataset "selection": shape (23191107,), type "|b1">
"""
"""
Convert to observed fluxes
intrinsic extinction. Thin / thick obscuration
Follows Buchner et al. 2016
"""
import h5py # HDF5 support
import os
import glob
import numpy as n
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
plotDir = os.path.join(os.environ['HOME'], 'wwwDir', "eRoMok", "h5")
def get_positions(path_to_lc, area, z_max=3., ra_max=1.):
f = h5py.File(path_to_lc, 'r')
is_gal = (abs(f['/sky_position/RA'].value)<ra_max) & (f['/sky_position/selection'].value) & (f['/sky_position/redshift_R'].value<z_max)
is_agn = (is_gal) & (f['/agn_properties/agn_activity'].value==1) & (f['/agn_properties/rxay_flux_05_20'].value>0)
#n_gal = len(f['/sky_position/redshift_S'].value[is_gal])
#n_agn = len(f['/sky_position/redshift_S'].value[is_agn])
#zR_g = f['/sky_position/redshift_R'].value[is_gal]
#zS_g = f['/sky_position/redshift_S'].value[is_gal]
#dec_g = f['/sky_position/DEC'].value[is_gal]*n.pi/180.
zR_a = f['/sky_position/redshift_R'].value[is_agn]
zS_a = f['/sky_position/redshift_S'].value[is_agn]
dec_a = f['/sky_position/DEC'].value[is_agn]*n.pi/180.
f.close()
# return zR_g, zS_g, dec_g, zR_a, zS_a, dec_a
return zR_a, zS_a, dec_a
p.figure(2, (10,4))
p.axes([0,0,1,1])
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_remaped_position_L15.hdf5'
area = 14.323944878104827*2. * 2*20.257311381848154
#zR_g, zS_g, dec_g, zR_a, zS_a, dec_a = get_positions(path_to_lc, area, z_max=3.)
zR_a, zS_a, dec_a = get_positions(path_to_lc, area, z_max=3.)
p.plot(zR_a, zR_a*n.tan(dec_a), 'r,', alpha=0.2, rasterized = True, label = 'L15 z<0.54 1160deg2' )
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_remaped_position_L3.hdf5'
area = 6.7529257176359*2. * 2* 8.269819492449505
#zR_g, zS_g, dec_g, zR_a, zS_a, dec_a = get_positions(path_to_lc, area, z_max=1.1)
zR_a, zS_a, dec_a = get_positions(path_to_lc, area, z_max=1.1)
p.plot(zR_a, zR_a*n.tan(dec_a), 'b,', alpha=0.2, rasterized=True, label = 'L3 z<1.08, 223deg2')
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_remaped_position_L6.hdf5'
area = 1.9766516114702513*2. * 2*2.0047373031569915
#zR_g, zS_g, dec_g, zR_a, zS_a, dec_a = get_positions(path_to_lc, area, z_max=3.)
zR_a, zS_a, dec_a = get_positions(path_to_lc, area, z_max=3.)
p.plot(zR_a, zR_a*n.tan(dec_a), 'k,', alpha=0.2, rasterized=True, label = 'L6 z<3, 15deg2')
p.xlabel('redshift')
p.ylabel('DEC')
p.legend(frameon=False, loc=0)
p.xscale('log')
p.xlim((0,3))
p.ylim((-0.2, 0.2))
#p.title('Mocks')
#p.grid()
p.axis('off')
p.savefig(os.path.join(plotDir, "wedges_AGN.jpg"))
p.clf()
| unlicense |
anomam/pvlib-python | setup.py | 1 | 3709 | #!/usr/bin/env python
import os
try:
from setuptools import setup
from setuptools.extension import Extension
except ImportError:
raise RuntimeError('setuptools is required')
import versioneer
DESCRIPTION = ('A set of functions and classes for simulating the ' +
'performance of photovoltaic energy systems.')
LONG_DESCRIPTION = """
PVLIB Python is a community supported tool that provides a set of
functions and classes for simulating the performance of photovoltaic
energy systems. PVLIB Python was originally ported from the PVLIB MATLAB
toolbox developed at Sandia National Laboratories and it implements many
of the models and methods developed at the Labs. More information on
Sandia Labs PV performance modeling programs can be found at
https://pvpmc.sandia.gov/. We collaborate with the PVLIB MATLAB project,
but operate independently of it.
We need your help to make pvlib-python a great tool!
Documentation: http://pvlib-python.readthedocs.io
Source code: https://github.com/pvlib/pvlib-python
"""
DISTNAME = 'pvlib'
LICENSE = 'BSD 3-Clause'
AUTHOR = 'pvlib python Developers'
MAINTAINER_EMAIL = 'holmgren@email.arizona.edu'
URL = 'https://github.com/pvlib/pvlib-python'
INSTALL_REQUIRES = ['numpy >= 1.12.0',
'pandas >= 0.18.1',
'pytz',
'requests']
TESTS_REQUIRE = ['nose', 'pytest', 'pytest-cov', 'pytest-mock',
'pytest-timeout', 'pytest-rerunfailures', 'pytest-remotedata']
EXTRAS_REQUIRE = {
'optional': ['ephem', 'cython', 'netcdf4', 'nrel-pysam', 'numba',
'pvfactors', 'scipy', 'siphon', 'tables'],
'doc': ['ipython', 'matplotlib', 'sphinx == 1.8.5', 'sphinx_rtd_theme',
'sphinx-gallery', 'docutils == 0.15.2'],
'test': TESTS_REQUIRE
}
EXTRAS_REQUIRE['all'] = sorted(set(sum(EXTRAS_REQUIRE.values(), [])))
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
]
setuptools_kwargs = {
'zip_safe': False,
'scripts': [],
'include_package_data': True,
'python_requires': '~=3.5'
}
# set up pvlib packages to be installed and extensions to be compiled
PACKAGES = ['pvlib']
extensions = []
spa_sources = ['pvlib/spa_c_files/spa.c', 'pvlib/spa_c_files/spa_py.c']
spa_depends = ['pvlib/spa_c_files/spa.h']
spa_all_file_paths = map(lambda x: os.path.join(os.path.dirname(__file__), x),
spa_sources + spa_depends)
if all(map(os.path.exists, spa_all_file_paths)):
print('all spa_c files found')
PACKAGES.append('pvlib.spa_c_files')
spa_ext = Extension('pvlib.spa_c_files.spa_py',
sources=spa_sources, depends=spa_depends)
extensions.append(spa_ext)
else:
print('WARNING: spa_c files not detected. ' +
'See installation instructions for more information.')
setup(name=DISTNAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=PACKAGES,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=TESTS_REQUIRE,
ext_modules=extensions,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url=URL,
classifiers=CLASSIFIERS,
**setuptools_kwargs)
| bsd-3-clause |
kambysese/mne-python | mne/time_frequency/tfr.py | 1 | 103477 | """A module which implements the time-frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Clement Moutard <clement.moutard@polytechnique.org>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License : BSD (3-clause)
from copy import deepcopy
from functools import partial
import numpy as np
from .multitaper import dpss_windows
from ..baseline import rescale
from ..fixes import _import_fft
from ..filter import next_fast_len
from ..parallel import parallel_func
from ..utils import (logger, verbose, _time_mask, _freq_mask, check_fname,
sizeof_fmt, GetEpochsMixin, _prepare_read_metadata,
fill_doc, _prepare_write_metadata, _check_event_id,
_gen_events, SizeMixin, _is_numeric, _check_option,
_validate_type, _check_combine, _check_pandas_installed,
_check_pandas_index_arguments, _check_time_format,
_convert_times, _build_data_frame)
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..channels.layout import _merge_ch_data, _pair_grad_sensors
from ..io.pick import (pick_info, _picks_to_idx, channel_type, _pick_inst,
_get_channel_types)
from ..io.meas_info import Info
from ..viz.utils import (figure_nobar, plt_show, _setup_cmap, warn,
_connection_line, _prepare_joint_axes,
_setup_vmin_vmax, _set_title_multiple_electrodes)
from ..externals.h5io import write_hdf5, read_hdf5
def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False):
"""Compute Morlet wavelets for the given frequency range.
Parameters
----------
sfreq : float
The sampling Frequency.
freqs : array
Frequency range of interest (1 x Frequencies).
n_cycles : float | array of float, default 7.0
Number of cycles. Fixed number or one per frequency.
sigma : float, default None
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool, default False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
freqs = np.array(freqs)
if np.any(freqs <= 0):
raise ValueError("all frequencies in 'freqs' must be "
"greater than 0.")
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= np.sqrt(0.5) * np.linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _make_dpss(sfreq, freqs, n_cycles=7., time_bandwidth=4.0, zero_mean=False):
"""Compute DPSS tapers for the given frequency range.
Parameters
----------
sfreq : float
The sampling frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,), default 7.
The number of cycles globally or for each frequency.
time_bandwidth : float, default 4.0
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
zero_mean : bool | None, , default False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
freqs = np.array(freqs)
if np.any(freqs <= 0):
raise ValueError("all frequencies in 'freqs' must be "
"greater than 0.")
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= np.sqrt(0.5) * np.linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
# Low level convolution
def _get_nfft(wavelets, X, use_fft=True, check=True):
n_times = X.shape[-1]
max_size = max(w.size for w in wavelets)
if max_size > n_times:
msg = (f'At least one of the wavelets ({max_size}) is longer than the '
f'signal ({n_times}). Consider using a longer signal or '
'shorter wavelets.')
if check:
if use_fft:
warn(msg, UserWarning)
else:
raise ValueError(msg)
nfft = n_times + max_size - 1
nfft = next_fast_len(nfft) # 2 ** int(np.ceil(np.log2(nfft)))
return nfft
def _cwt_gen(X, Ws, *, fsize=0, mode="same", decim=1, use_fft=True):
"""Compute cwt with fft based convolutions or temporal convolutions.
Parameters
----------
X : array of shape (n_signals, n_times)
The data.
Ws : list of array
Wavelets time series.
fsize : int
FFT length.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
use_fft : bool, default True
Use the FFT for convolutions or not.
Returns
-------
out : array, shape (n_signals, n_freqs, n_time_decim)
The time-frequency transform of the signals.
"""
fft, ifft = _import_fft(('fft', 'ifft'))
_check_option('mode', mode, ['same', 'valid', 'full'])
decim = _check_decim(decim)
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
_, n_times = X.shape
n_times_out = X[:, decim].shape[1]
n_freqs = len(Ws)
# precompute FFTs of Ws
if use_fft:
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
fft_Ws[i] = fft(W, fsize)
# Make generator looping across signals
tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128)
for x in X:
if use_fft:
fft_x = fft(x, fsize)
# Loop across wavelets
for ii, W in enumerate(Ws):
if use_fft:
ret = ifft(fft_x * fft_Ws[ii])[:n_times + W.size - 1]
else:
ret = np.convolve(x, W, mode=mode)
# Center and decimate decomposition
if mode == 'valid':
sz = int(abs(W.size - n_times)) + 1
offset = (n_times - sz) // 2
this_slice = slice(offset // decim.step,
(offset + sz) // decim.step)
if use_fft:
ret = _centered(ret, sz)
tfr[ii, this_slice] = ret[decim]
elif mode == 'full' and not use_fft:
start = (W.size - 1) // 2
end = len(ret) - (W.size // 2)
ret = ret[start:end]
tfr[ii, :] = ret[decim]
else:
if use_fft:
ret = _centered(ret, n_times)
tfr[ii, :] = ret[decim]
yield tfr
# Loop of convolution: single trial
def _compute_tfr(epoch_data, freqs, sfreq=1.0, method='morlet',
n_cycles=7.0, zero_mean=None, time_bandwidth=None,
use_fft=True, decim=1, output='complex', n_jobs=1,
verbose=None):
"""Compute time-frequency transforms.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
freqs : array-like of floats, shape (n_freqs)
The frequencies.
sfreq : float | int, default 1.0
Sampling frequency of the data.
method : 'multitaper' | 'morlet', default 'morlet'
The time-frequency method. 'morlet' convolves a Morlet wavelet.
'multitaper' uses complex exponentials windowed with multiple DPSS
tapers.
n_cycles : float | array of float, default 7.0
Number of cycles in the wavelet. Fixed number
or one per frequency.
zero_mean : bool | None, default None
None means True for method='multitaper' and False for method='morlet'.
If True, make sure the wavelets have a mean of zero.
time_bandwidth : float, default None
If None and method=multitaper, will be set to 4.0 (3 tapers).
Time x (Full) Bandwidth product. Only applies if
method == 'multitaper'. The number of good tapers (low-bias) is
chosen automatically based on this to equal floor(time_bandwidth - 1).
use_fft : bool, default True
Use the FFT for convolutions or not.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc
"""
# Check data
epoch_data = np.asarray(epoch_data)
if epoch_data.ndim != 3:
raise ValueError('epoch_data must be of shape (n_epochs, n_chans, '
'n_times), got %s' % (epoch_data.shape,))
# Check params
freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim = \
_check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output)
decim = _check_decim(decim)
if (freqs > sfreq / 2.).any():
raise ValueError('Cannot compute freq above Nyquist freq of the data '
'(%0.1f Hz), got %0.1f Hz'
% (sfreq / 2., freqs.max()))
# We decimate *after* decomposition, so we need to create our kernels
# for the original sfreq
if method == 'morlet':
W = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
Ws = [W] # to have same dimensionality as the 'multitaper' case
elif method == 'multitaper':
Ws = _make_dpss(sfreq, freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
# Check wavelets
if len(Ws[0][0]) > epoch_data.shape[2]:
raise ValueError('At least one of the wavelets is longer than the '
'signal. Use a longer signal or shorter wavelets.')
# Initialize output
n_freqs = len(freqs)
n_epochs, n_chans, n_times = epoch_data[:, :, decim].shape
if output in ('power', 'phase', 'avg_power', 'itc'):
dtype = np.float64
elif output in ('complex', 'avg_power_itc'):
# avg_power_itc is stored as power + 1i * itc to keep a
# simple dimensionality
dtype = np.complex128
if ('avg_' in output) or ('itc' in output):
out = np.empty((n_chans, n_freqs, n_times), dtype)
else:
out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype)
# Parallel computation
all_Ws = sum([list(W) for W in Ws], list())
_get_nfft(all_Ws, epoch_data, use_fft)
parallel, my_cwt, _ = parallel_func(_time_frequency_loop, n_jobs)
# Parallelization is applied across channels.
tfrs = parallel(
my_cwt(channel, Ws, output, use_fft, 'same', decim)
for channel in epoch_data.transpose(1, 0, 2))
# FIXME: to avoid overheads we should use np.array_split()
for channel_idx, tfr in enumerate(tfrs):
out[channel_idx] = tfr
if ('avg_' not in output) and ('itc' not in output):
# This is to enforce that the first dimension is for epochs
out = out.transpose(1, 0, 2, 3)
return out
def _check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output):
"""Aux. function to _compute_tfr to check the params validity."""
# Check freqs
if not isinstance(freqs, (list, np.ndarray)):
raise ValueError('freqs must be an array-like, got %s '
'instead.' % type(freqs))
freqs = np.asarray(freqs, dtype=float)
if freqs.ndim != 1:
raise ValueError('freqs must be of shape (n_freqs,), got %s '
'instead.' % np.array(freqs.shape))
# Check sfreq
if not isinstance(sfreq, (float, int)):
raise ValueError('sfreq must be a float or an int, got %s '
'instead.' % type(sfreq))
sfreq = float(sfreq)
# Default zero_mean = True if multitaper else False
zero_mean = method == 'multitaper' if zero_mean is None else zero_mean
if not isinstance(zero_mean, bool):
raise ValueError('zero_mean should be of type bool, got %s. instead'
% type(zero_mean))
freqs = np.asarray(freqs)
if (method == 'multitaper') and (output == 'phase'):
raise NotImplementedError(
'This function is not optimized to compute the phase using the '
'multitaper method. Use np.angle of the complex output instead.')
# Check n_cycles
if isinstance(n_cycles, (int, float)):
n_cycles = float(n_cycles)
elif isinstance(n_cycles, (list, np.ndarray)):
n_cycles = np.array(n_cycles)
if len(n_cycles) != len(freqs):
raise ValueError('n_cycles must be a float or an array of length '
'%i frequencies, got %i cycles instead.' %
(len(freqs), len(n_cycles)))
else:
raise ValueError('n_cycles must be a float or an array, got %s '
'instead.' % type(n_cycles))
# Check time_bandwidth
if (method == 'morlet') and (time_bandwidth is not None):
raise ValueError('time_bandwidth only applies to "multitaper" method.')
elif method == 'multitaper':
time_bandwidth = (4.0 if time_bandwidth is None
else float(time_bandwidth))
# Check use_fft
if not isinstance(use_fft, bool):
raise ValueError('use_fft must be a boolean, got %s '
'instead.' % type(use_fft))
# Check decim
if isinstance(decim, int):
decim = slice(None, None, decim)
if not isinstance(decim, slice):
raise ValueError('decim must be an integer or a slice, '
'got %s instead.' % type(decim))
# Check output
_check_option('output', output, ['complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc'])
_check_option('method', method, ['multitaper', 'morlet'])
return freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim
def _time_frequency_loop(X, Ws, output, use_fft, mode, decim):
"""Aux. function to _compute_tfr.
Loops time-frequency transform across wavelets and epochs.
Parameters
----------
X : array, shape (n_epochs, n_times)
The epochs data of a single channel.
Ws : list, shape (n_tapers, n_wavelets, n_times)
The wavelets.
output : str
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
use_fft : bool
Use the FFT for convolutions or not.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : slice
The decimation slice: e.g. power[:, decim]
"""
# Set output type
dtype = np.float64
if output in ['complex', 'avg_power_itc']:
dtype = np.complex128
# Init outputs
decim = _check_decim(decim)
n_epochs, n_times = X[:, decim].shape
n_freqs = len(Ws[0])
if ('avg_' in output) or ('itc' in output):
tfrs = np.zeros((n_freqs, n_times), dtype=dtype)
else:
tfrs = np.zeros((n_epochs, n_freqs, n_times), dtype=dtype)
# Loops across tapers.
for W in Ws:
# No need to check here, it's done earlier (outside parallel part)
nfft = _get_nfft(W, X, use_fft, check=False)
coefs = _cwt_gen(
X, W, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft)
# Inter-trial phase locking is apparently computed per taper...
if 'itc' in output:
plf = np.zeros((n_freqs, n_times), dtype=np.complex128)
# Loop across epochs
for epoch_idx, tfr in enumerate(coefs):
# Transform complex values
if output in ['power', 'avg_power']:
tfr = (tfr * tfr.conj()).real # power
elif output == 'phase':
tfr = np.angle(tfr)
elif output == 'avg_power_itc':
tfr_abs = np.abs(tfr)
plf += tfr / tfr_abs # phase
tfr = tfr_abs ** 2 # power
elif output == 'itc':
plf += tfr / np.abs(tfr) # phase
continue # not need to stack anything else than plf
# Stack or add
if ('avg_' in output) or ('itc' in output):
tfrs += tfr
else:
tfrs[epoch_idx] += tfr
# Compute inter trial coherence
if output == 'avg_power_itc':
tfrs += 1j * np.abs(plf)
elif output == 'itc':
tfrs += np.abs(plf)
# Normalization of average metrics
if ('avg_' in output) or ('itc' in output):
tfrs /= n_epochs
# Normalization by number of taper
tfrs /= len(Ws)
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform.
Parameters
----------
X : array, shape (n_signals, n_times)
The signals.
Ws : list of array
Wavelets time series.
use_fft : bool
Use FFT for convolutions. Defaults to True.
mode : 'same' | 'valid' | 'full'
Convention for convolution. 'full' is currently not implemented with
``use_fft=False``. Defaults to ``'same'``.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
Defaults to 1.
Returns
-------
tfr : array, shape (n_signals, n_freqs, n_times)
The time-frequency decompositions.
See Also
--------
mne.time_frequency.tfr_morlet : Compute time-frequency decomposition
with Morlet wavelets.
"""
nfft = _get_nfft(Ws, X, use_fft)
return _cwt_array(X, Ws, nfft, mode, decim, use_fft)
def _cwt_array(X, Ws, nfft, mode, decim, use_fft):
decim = _check_decim(decim)
coefs = _cwt_gen(
X, Ws, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft)
n_signals, n_times = X[:, decim].shape
tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex128)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def _tfr_aux(method, inst, freqs, decim, return_itc, picks, average,
output=None, **tfr_params):
from ..epochs import BaseEpochs
"""Help reduce redundancy between tfr_morlet and tfr_multitaper."""
decim = _check_decim(decim)
data = _get_data(inst, return_itc)
info = inst.info.copy() # make a copy as sfreq can be altered
info, data = _prepare_picks(info, data, picks, axis=1)
del picks
if average:
if output == 'complex':
raise ValueError('output must be "power" if average=True')
if return_itc:
output = 'avg_power_itc'
else:
output = 'avg_power'
else:
output = 'power' if output is None else output
if return_itc:
raise ValueError('Inter-trial coherence is not supported'
' with average=False')
out = _compute_tfr(data, freqs, info['sfreq'], method=method,
output=output, decim=decim, **tfr_params)
times = inst.times[decim].copy()
info['sfreq'] /= decim.step
if average:
if return_itc:
power, itc = out.real, out.imag
else:
power = out
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='%s-power' % method)
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='%s-itc' % method))
else:
power = out
if isinstance(inst, BaseEpochs):
meta = deepcopy(inst._metadata)
evs = deepcopy(inst.events)
ev_id = deepcopy(inst.event_id)
selection = deepcopy(inst.selection)
drop_log = deepcopy(inst.drop_log)
else:
# if the input is of class Evoked
meta = evs = ev_id = selection = drop_log = None
out = EpochsTFR(info, power, times, freqs, method='%s-power' % method,
events=evs, event_id=ev_id, selection=selection,
drop_log=drop_log, metadata=meta)
return out
@verbose
def tfr_morlet(inst, freqs, n_cycles, use_fft=False, return_itc=True, decim=1,
n_jobs=1, picks=None, zero_mean=True, average=True,
output='power', verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Same computation as `~mne.time_frequency.tfr_array_morlet`, but
operates on `~mne.Epochs` objects instead of
:class:`NumPy arrays <numpy.ndarray>`.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool, default False
The fft based convolution or not.
return_itc : bool, default True
Return inter-trial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
%(n_jobs)s
picks : array-like of int | None, default None
The indices of the channels to decompose. If None, all available
good data channels are decomposed.
zero_mean : bool, default True
Make sure the wavelet has a mean of zero.
.. versionadded:: 0.13.0
%(tfr_average)s
output : str
Can be "power" (default) or "complex". If "complex", then
average must be False.
.. versionadded:: 0.15.0
%(verbose)s
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=zero_mean, output=output)
return _tfr_aux('morlet', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
@verbose
def tfr_array_morlet(epoch_data, sfreq, freqs, n_cycles=7.0,
zero_mean=False, use_fft=True, decim=1, output='complex',
n_jobs=1, verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Same computation as `~mne.time_frequency.tfr_morlet`, but operates on
:class:`NumPy arrays <numpy.ndarray>` instead of `~mne.Epochs` objects.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
sfreq : float | int
Sampling frequency of the data.
freqs : array-like of float, shape (n_freqs,)
The frequencies.
n_cycles : float | array of float, default 7.0
Number of cycles in the Morlet wavelet. Fixed number or one per
frequency.
zero_mean : bool | False
If True, make sure the wavelets have a mean of zero. default False.
use_fft : bool
Use the FFT for convolutions or not. default True.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition. default 1
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels. Default 1.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc.
See Also
--------
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
.. versionadded:: 0.14.0
"""
return _compute_tfr(epoch_data=epoch_data, freqs=freqs,
sfreq=sfreq, method='morlet', n_cycles=n_cycles,
zero_mean=zero_mean, time_bandwidth=None,
use_fft=use_fft, decim=decim, output=output,
n_jobs=n_jobs, verbose=verbose)
@verbose
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
use_fft=True, return_itc=True, decim=1,
n_jobs=1, picks=None, average=True, verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS tapers.
Same computation as `~mne.time_frequency.tfr_array_multitaper`, but
operates on `~mne.Epochs` objects instead of
:class:`NumPy arrays <numpy.ndarray>`.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional), default 4.0 (n_tapers=3)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool, default True
The fft based convolution or not.
return_itc : bool, default True
Return inter-trial coherence (ITC) as well as averaged (or
single-trial) power.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
%(n_jobs)s
%(picks_good_data)s
%(tfr_average)s
%(verbose)s
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
.. versionadded:: 0.9.0
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=True, time_bandwidth=time_bandwidth)
return _tfr_aux('multitaper', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
# TFR(s) class
class _BaseTFR(ContainsMixin, UpdateChannelsMixin, SizeMixin):
"""Base TFR class."""
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@fill_doc
def crop(self, tmin=None, tmax=None, fmin=None, fmax=None,
include_tmax=True):
"""Crop data to a given time interval in place.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
fmin : float | None
Lowest frequency of selection in Hz.
.. versionadded:: 0.18.0
fmax : float | None
Highest frequency of selection in Hz.
.. versionadded:: 0.18.0
%(include_tmax)s
Returns
-------
inst : instance of AverageTFR
The modified instance.
"""
if tmin is not None or tmax is not None:
time_mask = _time_mask(
self.times, tmin, tmax, sfreq=self.info['sfreq'],
include_tmax=include_tmax)
else:
time_mask = slice(None)
if fmin is not None or fmax is not None:
freq_mask = _freq_mask(self.freqs, sfreq=self.info['sfreq'],
fmin=fmin, fmax=fmax)
else:
freq_mask = slice(None)
self.times = self.times[time_mask]
self.freqs = self.freqs[freq_mask]
# Deal with broadcasting (boolean arrays do not broadcast, but indices
# do, so we need to convert freq_mask to make use of broadcasting)
if isinstance(time_mask, np.ndarray) and \
isinstance(freq_mask, np.ndarray):
freq_mask = np.where(freq_mask)[0][:, np.newaxis]
self.data = self.data[..., freq_mask, time_mask]
return self
def copy(self):
"""Return a copy of the instance.
Returns
-------
copy : instance of EpochsTFR | instance of AverageTFR
A copy of the instance.
"""
return deepcopy(self)
@verbose
def apply_baseline(self, baseline, mode='mean', verbose=None):
"""Baseline correct the data.
Parameters
----------
baseline : array-like, shape (2,)
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
%(verbose_meth)s
Returns
-------
inst : instance of AverageTFR
The modified instance.
""" # noqa: E501
rescale(self.data, self.times, baseline, mode, copy=False)
return self
@verbose
def save(self, fname, overwrite=False, *, verbose=None):
"""Save TFR object to hdf5 file.
Parameters
----------
fname : str
The file name, which should end with ``-tfr.h5``.
%(overwrite)s
%(verbose)s
See Also
--------
read_tfrs, write_tfrs
"""
write_tfrs(fname, self, overwrite=overwrite)
@fill_doc
def to_data_frame(self, picks=None, index=None, long_format=False,
time_format='ms'):
"""Export data in tabular structure as a pandas DataFrame.
Channels are converted to columns in the DataFrame. By default,
additional columns ``'time'``, ``'freq'``, ``'epoch'``, and
``'condition'`` (epoch event description) are added, unless ``index``
is not ``None`` (in which case the columns specified in ``index`` will
be used to form the DataFrame's index instead). ``'epoch'``, and
``'condition'`` are not supported for ``AverageTFR``.
Parameters
----------
%(picks_all)s
%(df_index_epo)s
Valid string values are ``'time'``, ``'freq'``, ``'epoch'``, and
``'condition'`` for ``EpochsTFR`` and ``'time'`` and ``'freq'``
for ``AverageTFR``.
Defaults to ``None``.
%(df_longform_epo)s
%(df_time_format)s
.. versionadded:: 0.23
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time', 'freq']
if isinstance(self, EpochsTFR):
valid_index_args.extend(['epoch', 'condition'])
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
times = self.times
picks = _picks_to_idx(self.info, picks, 'all', exclude=())
if isinstance(self, EpochsTFR):
data = self.data[:, picks, :, :]
else:
data = self.data[np.newaxis, picks] # add singleton "epochs" axis
n_epochs, n_picks, n_freqs, n_times = data.shape
# reshape to (epochs*freqs*times) x signals
data = np.moveaxis(data, 1, -1)
data = data.reshape(n_epochs * n_freqs * n_times, n_picks)
# prepare extra columns / multiindex
mindex = list()
times = np.tile(times, n_epochs * n_freqs)
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
freqs = self.freqs
freqs = np.tile(np.repeat(freqs, n_times), n_epochs)
mindex.append(('freq', freqs))
if isinstance(self, EpochsTFR):
mindex.append(('epoch', np.repeat(self.selection,
n_times * n_freqs)))
rev_event_id = {v: k for k, v in self.event_id.items()}
conditions = [rev_event_id[k] for k in self.events[:, 2]]
mindex.append(('condition', np.repeat(conditions,
n_times * n_freqs)))
assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
# build DataFrame
if isinstance(self, EpochsTFR):
default_index = ['condition', 'epoch', 'freq', 'time']
else:
default_index = ['freq', 'time']
df = _build_data_frame(self, data, picks, long_format, mindex, index,
default_index=default_index)
return df
@fill_doc
class AverageTFR(_BaseTFR):
"""Container for Time-Frequency data.
Can for example store induced power at sensor level or inter-trial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None, default None
Comment on the data, e.g., the experimental condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
%(verbose)s
Attributes
----------
info : instance of Info
Measurement info.
ch_names : list
The names of the channels.
nave : int
Number of averaged epochs.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data array.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : str
Comment on dataset. Can be the condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None): # noqa: D102
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.nave = nave
self.comment = comment
self.method = method
self.preload = True
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True, title=None,
axes=None, layout=None, yscale='auto', mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=0.1, combine=None,
exclude=[], verbose=None):
"""Plot TFRs as a two-dimensional image(s).
Parameters
----------
%(picks_good_data)s
baseline : None (default) or tuple, shape (2,)
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maximum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | 'interactive' | (colormap, bool)
The colormap to use. If tuple, the first value indicates the
colormap to use and the second value is a boolean defining
interactivity. In interactive mode the colors are adjustable by
clicking and dragging the colorbar with left and right mouse
button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range.
Up and down arrows can be used to change the colormap. If
'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of images.
dB : bool
If True, 10*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | 'auto' | None
String for title. Defaults to None (blank/no title). If 'auto',
automatically create a title that lists up to 6 of the channels
used in the figure.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
.. versionadded:: 0.14.0
mask : ndarray | None
An array of booleans of the same shape as the data. Entries of the
data that correspond to False in the mask are plotted
transparently. Useful for, e.g., masking for statistical
significance.
.. versionadded:: 0.16.0
mask_style : None | 'both' | 'contour' | 'mask'
If ``mask`` is not None: if ``'contour'``, a contour line is drawn
around the masked areas (``True`` in ``mask``). If ``'mask'``,
entries not ``True`` in ``mask`` are shown transparently. If
``'both'``, both a contour and transparency are used.
If ``None``, defaults to ``'both'`` if ``mask`` is not None, and is
ignored otherwise.
.. versionadded:: 0.17
mask_cmap : matplotlib colormap | (colormap, bool) | 'interactive'
The colormap chosen for masked parts of the image (see below), if
``mask`` is not ``None``. If None, ``cmap`` is reused. Defaults to
``'Greys'``. Not interactive. Otherwise, as ``cmap``.
.. versionadded:: 0.17
mask_alpha : float
A float between 0 and 1. If ``mask`` is not None, this sets the
alpha level (degree of transparency) for the masked-out segments.
I.e., if 0, masked-out segments are not visible at all.
Defaults to 0.1.
.. versionadded:: 0.16.0
combine : 'mean' | 'rms' | None
Type of aggregation to perform across selected channels. If
None, plot one figure per selected channel.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to an empty list.
%(verbose_meth)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
return self._plot(picks=picks, baseline=baseline, mode=mode,
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=colorbar, show=show, title=title,
axes=axes, layout=layout, yscale=yscale, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap,
mask_alpha=mask_alpha, combine=combine,
exclude=exclude, verbose=verbose)
@verbose
def _plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True, title=None,
axes=None, layout=None, yscale='auto', mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=.25,
combine=None, exclude=None, copy=True,
source_plot_joint=False, topomap_args=dict(), ch_type=None,
verbose=None):
"""Plot TFRs as a two-dimensional image(s).
See self.plot() for parameters description.
"""
import matplotlib.pyplot as plt
from ..viz.topo import _imshow_tfr
# channel selection
# simply create a new tfr object(s) with the desired channel selection
tfr = _preproc_tfr_instance(
self, picks, tmin, tmax, fmin, fmax, vmin, vmax, dB, mode,
baseline, exclude, copy)
del picks
data = tfr.data
n_picks = len(tfr.ch_names) if combine is None else 1
if combine == 'mean':
data = data.mean(axis=0, keepdims=True)
elif combine == 'rms':
data = np.sqrt((data ** 2).mean(axis=0, keepdims=True))
elif combine is not None:
raise ValueError('combine must be None, mean or rms.')
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError('There must be an axes for each picked '
'channel.')
tmin, tmax = tfr.times[[0, -1]]
if vmax is None:
vmax = np.abs(data).max()
if vmin is None:
vmin = -np.abs(data).max()
if isinstance(axes, plt.Axes):
axes = [axes]
cmap = _setup_cmap(cmap)
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(
tfr._onselect, cmap=cmap, source_plot_joint=source_plot_joint,
topomap_args={k: v for k, v in topomap_args.items()
if k not in {"vmin", "vmax", "cmap", "axes"}})
_imshow_tfr(
ax, 0, tmin, tmax, vmin, vmax, onselect_callback, ylim=None,
tfr=data[idx: idx + 1], freq=tfr.freqs, x_label='Time (s)',
y_label='Frequency (Hz)', colorbar=colorbar, cmap=cmap,
yscale=yscale, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha)
if title is None:
if combine is None or len(tfr.info['ch_names']) == 1:
title = tfr.info['ch_names'][0]
else:
title = _set_title_multiple_electrodes(
title, combine, tfr.info["ch_names"], all=True,
ch_type=ch_type)
if title:
fig.suptitle(title)
plt_show(show)
# XXX This is inside the loop, guaranteeing a single iter!
# Also there is no None-contingent behavior here so the docstring
# was wrong (saying it would be collapsed)
return fig
@verbose
def plot_joint(self, timefreqs=None, picks=None, baseline=None,
mode='mean', tmin=None, tmax=None, fmin=None, fmax=None,
vmin=None, vmax=None, cmap='RdBu_r', dB=False,
colorbar=True, show=True, title=None,
yscale='auto', combine='mean', exclude=[],
topomap_args=None, image_args=None, verbose=None):
"""Plot TFRs as a two-dimensional image with topomaps.
Parameters
----------
timefreqs : None | list of tuple | dict of tuple
The time-frequency point(s) for which topomaps will be plotted.
See Notes.
%(picks_good_data)s
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None, the beginning of the data is used.
If b is None, then b is set to the end of the interval.
If baseline is equal to (None, None), the entire time
interval is used.
mode : None | str
If str, must be one of 'ratio', 'zscore', 'mean', 'percent',
'logratio' and 'zlogratio'.
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value of the color scale for the image (for
topomaps, see ``topomap_args``). If vmin is None, the data
absolute minimum value is used.
vmax : float | None
The maximum value of the color scale for the image (for
topomaps, see ``topomap_args``). If vmax is None, the data
absolute maximum value is used.
cmap : matplotlib colormap
The colormap to use.
dB : bool
If True, 10*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot (relating to the
topomaps). For user defined axes, the colorbar cannot be drawn.
Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
combine : 'mean' | 'rms'
Type of aggregation to perform across selected channels.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to an empty list, i.e., ``[]``.
topomap_args : None | dict
A dict of ``kwargs`` that are forwarded to
:func:`mne.viz.plot_topomap` to style the topomaps. ``axes`` and
``show`` are ignored. If ``times`` is not in this dict, automatic
peak detection is used. Beyond that, if ``None``, no customizable
arguments will be passed.
Defaults to ``None``.
image_args : None | dict
A dict of ``kwargs`` that are forwarded to :meth:`AverageTFR.plot`
to style the image. ``axes`` and ``show`` are ignored. Beyond that,
if ``None``, no customizable arguments will be passed.
Defaults to ``None``.
%(verbose_meth)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
Notes
-----
``timefreqs`` has three different modes: tuples, dicts, and auto.
For (list of) tuple(s) mode, each tuple defines a pair
(time, frequency) in s and Hz on the TFR plot. For example, to
look at 10 Hz activity 1 second into the epoch and 3 Hz activity
300 msec into the epoch, ::
timefreqs=((1, 10), (.3, 3))
If provided as a dictionary, (time, frequency) tuples are keys and
(time_window, frequency_window) tuples are the values - indicating the
width of the windows (centered on the time and frequency indicated by
the key) to be averaged over. For example, ::
timefreqs={(1, 10): (0.1, 2)}
would translate into a window that spans 0.95 to 1.05 seconds, as
well as 9 to 11 Hz. If None, a single topomap will be plotted at the
absolute peak across the time-frequency representation.
.. versionadded:: 0.16.0
""" # noqa: E501
from ..viz.topomap import (_set_contour_locator, plot_topomap,
_get_pos_outlines, _find_topomap_coords)
import matplotlib.pyplot as plt
#####################################
# Handle channels (picks and types) #
#####################################
# it would be nicer to let this happen in self._plot,
# but we need it here to do the loop over the remaining channel
# types in case a user supplies `picks` that pre-select only one
# channel type.
# Nonetheless, it should be refactored for code reuse.
copy = any(var is not None for var in (exclude, picks, baseline))
tfr = _pick_inst(self, picks, exclude, copy=copy)
del picks
ch_types = _get_channel_types(tfr.info, unique=True)
# if multiple sensor types: one plot per channel type, recursive call
if len(ch_types) > 1:
logger.info("Multiple channel types selected, returning one "
"figure per type.")
figs = list()
for this_type in ch_types: # pick corresponding channel type
type_picks = [idx for idx in range(tfr.info['nchan'])
if channel_type(tfr.info, idx) == this_type]
tf_ = _pick_inst(tfr, type_picks, None, copy=True)
if len(_get_channel_types(tf_.info, unique=True)) > 1:
raise RuntimeError(
'Possibly infinite loop due to channel selection '
'problem. This should never happen! Please check '
'your channel types.')
figs.append(
tf_.plot_joint(
timefreqs=timefreqs, picks=None, baseline=baseline,
mode=mode, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=colorbar, show=False, title=title,
yscale=yscale, combine=combine,
exclude=None, topomap_args=topomap_args,
verbose=verbose))
return figs
else:
ch_type = ch_types.pop()
# Handle timefreqs
timefreqs = _get_timefreqs(tfr, timefreqs)
n_timefreqs = len(timefreqs)
if topomap_args is None:
topomap_args = dict()
topomap_args_pass = {k: v for k, v in topomap_args.items() if
k not in ('axes', 'show', 'colorbar')}
topomap_args_pass['outlines'] = topomap_args.get('outlines', 'skirt')
topomap_args_pass["contours"] = topomap_args.get('contours', 6)
topomap_args_pass['ch_type'] = ch_type
##############
# Image plot #
##############
fig, tf_ax, map_ax, cbar_ax = _prepare_joint_axes(n_timefreqs)
cmap = _setup_cmap(cmap)
# image plot
# we also use this to baseline and truncate (times and freqs)
# (a copy of) the instance
if image_args is None:
image_args = dict()
fig = tfr._plot(
picks=None, baseline=baseline, mode=mode, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=False, show=False, title=title, axes=tf_ax,
yscale=yscale, combine=combine, exclude=None, copy=False,
source_plot_joint=True, topomap_args=topomap_args_pass,
ch_type=ch_type, **image_args)
# set and check time and freq limits ...
# can only do this after the tfr plot because it may change these
# parameters
tmax, tmin = tfr.times.max(), tfr.times.min()
fmax, fmin = tfr.freqs.max(), tfr.freqs.min()
for time, freq in timefreqs.keys():
if not (tmin <= time <= tmax):
error_value = "time point (" + str(time) + " s)"
elif not (fmin <= freq <= fmax):
error_value = "frequency (" + str(freq) + " Hz)"
else:
continue
raise ValueError("Requested " + error_value + " exceeds the range"
"of the data. Choose different `timefreqs`.")
############
# Topomaps #
############
titles, all_data, all_pos, vlims = [], [], [], []
# the structure here is a bit complicated to allow aggregating vlims
# over all topomaps. First, one loop over all timefreqs to collect
# vlims. Then, find the max vlims and in a second loop over timefreqs,
# do the actual plotting.
timefreqs_array = np.array([np.array(keys) for keys in timefreqs])
order = timefreqs_array[:, 0].argsort() # sort by time
for ii, (time, freq) in enumerate(timefreqs_array[order]):
avg = timefreqs[(time, freq)]
# set up symmetric windows
time_half_range, freq_half_range = avg / 2.
if time_half_range == 0:
time = tfr.times[np.argmin(np.abs(tfr.times - time))]
if freq_half_range == 0:
freq = tfr.freqs[np.argmin(np.abs(tfr.freqs - freq))]
if (time_half_range == 0) and (freq_half_range == 0):
sub_map_title = '(%.2f s,\n%.1f Hz)' % (time, freq)
else:
sub_map_title = \
'(%.1f \u00B1 %.1f s,\n%.1f \u00B1 %.1f Hz)' % \
(time, time_half_range, freq, freq_half_range)
tmin = time - time_half_range
tmax = time + time_half_range
fmin = freq - freq_half_range
fmax = freq + freq_half_range
data = tfr.data
# merging grads here before rescaling makes ERDs visible
sphere = topomap_args.get('sphere')
if ch_type == 'grad':
picks = _pair_grad_sensors(tfr.info, topomap_coords=False)
pos = _find_topomap_coords(
tfr.info, picks=picks[::2], sphere=sphere)
method = combine or 'rms'
data, _ = _merge_ch_data(data[picks], ch_type, [],
method=method)
del picks, method
else:
pos, _ = _get_pos_outlines(tfr.info, None, sphere)
del sphere
all_pos.append(pos)
data, times, freqs, _, _ = _preproc_tfr(
data, tfr.times, tfr.freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, None, tfr.info['sfreq'])
vlims.append(np.abs(data).max())
titles.append(sub_map_title)
all_data.append(data)
new_t = tfr.times[np.abs(tfr.times - np.median([times])).argmin()]
new_f = tfr.freqs[np.abs(tfr.freqs - np.median([freqs])).argmin()]
timefreqs_array[ii] = (new_t, new_f)
# passing args to the topomap calls
max_lim = max(vlims)
topomap_args_pass["vmin"] = vmin = topomap_args.get('vmin', -max_lim)
topomap_args_pass["vmax"] = vmax = topomap_args.get('vmax', max_lim)
locator, contours = _set_contour_locator(
vmin, vmax, topomap_args_pass["contours"])
topomap_args_pass['contours'] = contours
for ax, title, data, pos in zip(map_ax, titles, all_data, all_pos):
ax.set_title(title)
plot_topomap(data.mean(axis=(-1, -2)), pos,
cmap=cmap[0], axes=ax, show=False,
**topomap_args_pass)
#############
# Finish up #
#############
if colorbar:
from matplotlib import ticker
cbar = plt.colorbar(ax.images[0], cax=cbar_ax)
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
plt.subplots_adjust(left=.12, right=.925, bottom=.14,
top=1. if title is not None else 1.2)
# draw the connection lines between time series and topoplots
lines = [_connection_line(time_, fig, tf_ax, map_ax_, y=freq_,
y_source_transform="transData")
for (time_, freq_), map_ax_ in zip(timefreqs_array, map_ax)]
fig.lines.extend(lines)
plt_show(show)
return fig
@verbose
def _onselect(self, eclick, erelease, baseline=None, mode=None,
cmap=None, source_plot_joint=False, topomap_args=None,
verbose=None):
"""Handle rubber band selector in channel tfr."""
from ..viz.topomap import plot_tfr_topomap, plot_topomap, _add_colorbar
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
tmin = round(min(eclick.xdata, erelease.xdata), 5) # s
tmax = round(max(eclick.xdata, erelease.xdata), 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
if len(_pair_grad_sensors(self.info, topomap_coords=False,
raise_error=False)) >= 2:
types.append('grad')
elif len(types) == 0:
return # Don't draw a figure for nothing.
fig = figure_nobar()
fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(
tmin, tmax, fmin, fmax), y=0.04)
if source_plot_joint:
ax = fig.add_subplot(111)
data = _preproc_tfr(
self.data, self.times, self.freqs, tmin, tmax, fmin, fmax,
None, None, None, None, None, self.info['sfreq'])[0]
data = data.mean(-1).mean(-1)
vmax = np.abs(data).max()
im, _ = plot_topomap(data, self.info, vmin=-vmax, vmax=vmax,
cmap=cmap[0], axes=ax, show=False,
**topomap_args)
_add_colorbar(ax, im, cmap, title="AU", pad=.1)
fig.show()
else:
for idx, ch_type in enumerate(types):
ax = fig.add_subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None, axes=ax)
@verbose
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', fig_background=None,
font_color='w', yscale='auto', verbose=None):
"""Plot TFRs in a topography with images.
Parameters
----------
%(picks_good_data)s
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value of the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maximum value of the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 10*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
Matplotlib borders style to be used for each sensor plot.
fig_facecolor : color
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
font_color : color
The color of tick labels in the colorbar. Defaults to white.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz.topo import _imshow_tfr, _plot_topo, _imshow_tfr_unified
from ..viz import add_background_image
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data = _prepare_picks(info, data, picks, axis=0)
del picks
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB, info['sfreq'])
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode)
click_fun = partial(_imshow_tfr, tfr=data, freq=freqs, yscale=yscale,
cmap=(cmap, True), onselect=onselect_callback)
imshow = partial(_imshow_tfr_unified, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
click_func=click_fun, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (s)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor, font_color=font_color,
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
@fill_doc
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
vmin=None, vmax=None, cmap=None, sensors=True,
colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head',
contours=6, sphere=None):
"""Plot topographic maps of time-frequency intervals of TFR data.
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None (default), 'Reds' is used
for all positive data, otherwise defaults to 'RdBu_r'. If
'interactive', translates to (None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If ``mask`` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be
drawn. When an integer, matplotlib ticker locator is used to find
suitable values for the contour thresholds (may sometimes be
inaccurate, use array for accuracy). If an array, the values
represent the levels for the contours. If colorbar=True, the ticks
in colorbar correspond to the contour levels. Defaults to 6.
%(topomap_sphere_auto)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines,
contours=contours, sphere=sphere)
def _check_compat(self, tfr):
"""Check that self and tfr have the same time-frequency ranges."""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr): # noqa: D105
"""Add instances."""
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr): # noqa: D105
"""Subtract instances."""
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data -= tfr.data
return self
def __truediv__(self, a): # noqa: D105
"""Divide instances."""
out = self.copy()
out /= a
return out
def __itruediv__(self, a): # noqa: D105
self.data /= a
return self
def __mul__(self, a):
"""Multiply source instances."""
out = self.copy()
out *= a
return out
def __imul__(self, a): # noqa: D105
self.data *= a
return self
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<AverageTFR | %s>" % s
@fill_doc
class EpochsTFR(_BaseTFR, GetEpochsMixin):
"""Container for Time-Frequency data on epochs.
Can for example store induced power at sensor level.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : str | None, default None
Comment on the data, e.g., the experimental condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
events : ndarray, shape (n_events, 3) | None
The events as stored in the Epochs class. If None (default), all event
values are set to 1 and event time-samples are set to range(n_epochs).
event_id : dict | None
Example: dict(auditory=1, visual=3). They keys can be used to access
associated events. If None, all events will be used and a dict is
created with string integer names corresponding to the event id
integers.
selection : iterable | None
Iterable of indices of selected epochs. If ``None``, will be
automatically generated, corresponding to all non-zero events.
.. versionadded:: 0.23
drop_log : tuple | None
Tuple of tuple of strings indicating which epochs have been marked to
be ignored.
.. versionadded:: 0.23
metadata : instance of pandas.DataFrame | None
A :class:`pandas.DataFrame` containing pertinent information for each
trial. See :class:`mne.Epochs` for further details.
%(verbose)s
Attributes
----------
info : instance of Info
Measurement info.
ch_names : list
The names of the channels.
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data array.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : string
Comment on dataset. Can be the condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
events : ndarray, shape (n_events, 3) | None
Array containing sample information as event_id
event_id : dict | None
Names of conditions correspond to event_ids
selection : array
List of indices of selected events (not dropped or ignored etc.). For
example, if the original event array had 4 events and the second event
has been dropped, this attribute would be np.array([0, 2, 3]).
drop_log : tuple of tuple
A tuple of the same length as the event array used to initialize the
``EpochsTFR`` object. If the i-th original event is still part of the
selection, drop_log[i] will be an empty tuple; otherwise it will be
a tuple of the reasons the event is not longer in the selection, e.g.:
- ``'IGNORED'``
If it isn't part of the current subset defined by the user
- ``'NO_DATA'`` or ``'TOO_SHORT'``
If epoch didn't contain enough data names of channels that
exceeded the amplitude threshold
- ``'EQUALIZED_COUNTS'``
See :meth:`~mne.Epochs.equalize_event_counts`
- ``'USER'``
For user-defined reasons (see :meth:`~mne.Epochs.drop`).
metadata : pandas.DataFrame, shape (n_events, n_cols) | None
DataFrame containing pertinent information for each trial
Notes
-----
.. versionadded:: 0.13.0
"""
@verbose
def __init__(self, info, data, times, freqs, comment=None, method=None,
events=None, event_id=None, selection=None,
drop_log=None, metadata=None, verbose=None):
# noqa: D102
self.info = info
if data.ndim != 4:
raise ValueError('data should be 4d. Got %d.' % data.ndim)
n_epochs, n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
if events is None:
n_epochs = len(data)
events = _gen_events(n_epochs)
if selection is None:
n_epochs = len(data)
selection = np.arange(n_epochs)
if drop_log is None:
n_epochs_prerejection = max(len(events), max(selection) + 1)
drop_log = tuple(
() if k in selection else ('IGNORED',)
for k in range(n_epochs_prerejection))
else:
drop_log = drop_log
# check consistency:
assert len(selection) == len(events)
assert len(drop_log) >= len(events)
assert len(selection) == sum(
(len(dl) == 0 for dl in drop_log))
event_id = _check_event_id(event_id, events)
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.events = events
self.event_id = event_id
self.selection = selection
self.drop_log = drop_log
self.comment = comment
self.method = method
self.preload = True
self.metadata = metadata
@property
def _detrend_picks(self):
return list()
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", epochs : %d" % self.data.shape[0]
s += ', channels : %d' % self.data.shape[1]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<EpochsTFR | %s>" % s
def __abs__(self):
"""Take the absolute value."""
epochs = self.copy()
epochs.data = np.abs(self.data)
return epochs
def average(self, method='mean'):
"""Average the data across epochs.
Parameters
----------
method : str | callable
How to combine the data. If "mean"/"median", the mean/median
are returned. Otherwise, must be a callable which, when passed
an array of shape (n_epochs, n_channels, n_freqs, n_time)
returns an array of shape (n_channels, n_freqs, n_time).
Note that due to file type limitations, the kind for all
these will be "average".
Returns
-------
ave : instance of AverageTFR
The averaged data.
Notes
-----
Passing in ``np.median`` is considered unsafe when there is complex
data because NumPy doesn't compute the marginal median. Numpy currently
sorts the complex values by real part and return whatever value is
computed. Use with caution. We use the marginal median in the
complex case (i.e. the median of each component separately) if
one passes in ``median``. See a discussion in scipy:
https://github.com/scipy/scipy/pull/12676#issuecomment-783370228
"""
# return a lambda function for computing a combination metric
# over epochs
func = _check_combine(mode=method)
data = func(self.data)
if data.shape != self._data.shape[1:]:
raise RuntimeError(
'You passed a function that resulted in data of shape {}, '
'but it should be {}.'.format(
data.shape, self._data.shape[1:]))
return AverageTFR(info=self.info.copy(), data=data,
times=self.times.copy(), freqs=self.freqs.copy(),
nave=self.data.shape[0], method=self.method,
comment=self.comment)
def combine_tfr(all_tfr, weights='nave'):
"""Merge AverageTFR data by weighted addition.
Create a new AverageTFR instance, using a combination of the supplied
instances as its data. By default, the mean (weighted by trials) is used.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Data must have the same channels and the same time instants.
Parameters
----------
all_tfr : list of AverageTFR
The tfr datasets.
weights : list of float | str
The weights to apply to the data of each AverageTFR instance.
Can also be ``'nave'`` to weight according to tfr.nave,
or ``'equal'`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
tfr : AverageTFR
The new TFR data.
Notes
-----
.. versionadded:: 0.11.0
"""
tfr = all_tfr[0].copy()
if isinstance(weights, str):
if weights not in ('nave', 'equal'):
raise ValueError('Weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_tfr], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_tfr)] * len(all_tfr)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_tfr):
raise ValueError('Weights must be the same size as all_tfr')
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
assert t_.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (tfr, t_))
assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (tfr, t_))
# use union of bad channels
bads = list(set(tfr.info['bads']).union(*(t_.info['bads']
for t_ in all_tfr[1:])))
tfr.info['bads'] = bads
# XXX : should be refactored with combined_evoked function
tfr.data = sum(w * t_.data for w, t_ in zip(weights, all_tfr))
tfr.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_tfr))), 1)
return tfr
# Utils
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time."""
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis].copy()
return data
def _prepare_picks(info, data, picks, axis):
"""Prepare the picks."""
picks = _picks_to_idx(info, picks, exclude='bads')
info = pick_info(info, picks)
sl = [slice(None)] * data.ndim
sl[axis] = picks
data = data[tuple(sl)]
return info, data
def _centered(arr, newsize):
"""Aux Function to center data."""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, sfreq, copy=None):
"""Aux Function to prepare tfr computation."""
if copy is None:
copy = baseline is not None
data = rescale(data, times, baseline, mode, copy=copy)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax, sfreq=sfreq))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax, sfreq=sfreq))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
def _check_decim(decim):
"""Aux function checking the decim parameter."""
_validate_type(decim, ('int-like', slice), 'decim')
if not isinstance(decim, slice):
decim = slice(None, None, int(decim))
# ensure that we can actually use `decim.step`
if decim.step is None:
decim = slice(decim.start, decim.stop, 1)
return decim
# i/o
@verbose
def write_tfrs(fname, tfr, overwrite=False, *, verbose=None):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : str
The file name, which should end with ``-tfr.h5``.
tfr : AverageTFR | list of AverageTFR | EpochsTFR
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed.
%(overwrite)s
%(verbose)s
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython',
slash='replace')
def _prepare_write_tfr(tfr, condition):
"""Aux function."""
attributes = dict(times=tfr.times, freqs=tfr.freqs, data=tfr.data,
info=tfr.info, comment=tfr.comment, method=tfr.method)
if hasattr(tfr, 'nave'): # if AverageTFR
attributes['nave'] = tfr.nave
elif hasattr(tfr, 'events'): # if EpochsTFR
attributes['events'] = tfr.events
attributes['event_id'] = tfr.event_id
attributes['selection'] = tfr.selection
attributes['drop_log'] = tfr.drop_log
attributes['metadata'] = _prepare_write_metadata(tfr.metadata)
return condition, attributes
def read_tfrs(fname, condition=None):
"""Read TFR datasets from hdf5 file.
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
Returns
-------
tfr : AverageTFR | list of AverageTFR | EpochsTFR
Depending on ``condition`` either the TFR object or a list of multiple
TFR objects.
See Also
--------
write_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5', '_tfr.h5'))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython', slash='replace')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
tfr['info']._check_consistency()
if 'metadata' in tfr:
tfr['metadata'] = _prepare_read_metadata(tfr['metadata'])
is_average = 'nave' in tfr
if condition is not None:
if not is_average:
raise NotImplementedError('condition not supported when reading '
'EpochsTFR.')
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{}") in this file. '
'The file contains "{}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
inst = AverageTFR if is_average else EpochsTFR
out = [inst(**d) for d in list(zip(*tfr_data))[1]]
return out
def _get_timefreqs(tfr, timefreqs):
"""Find and/or setup timefreqs for `tfr.plot_joint`."""
# Input check
timefreq_error_msg = (
"Supplied `timefreqs` are somehow malformed. Please supply None, "
"a list of tuple pairs, or a dict of such tuple pairs, not: ")
if isinstance(timefreqs, dict):
for k, v in timefreqs.items():
for item in (k, v):
if len(item) != 2 or any((not _is_numeric(n) for n in item)):
raise ValueError(timefreq_error_msg, item)
elif timefreqs is not None:
if not hasattr(timefreqs, "__len__"):
raise ValueError(timefreq_error_msg, timefreqs)
if len(timefreqs) == 2 and all((_is_numeric(v) for v in timefreqs)):
timefreqs = [tuple(timefreqs)] # stick a pair of numbers in a list
else:
for item in timefreqs:
if (hasattr(item, "__len__") and len(item) == 2 and
all((_is_numeric(n) for n in item))):
pass
else:
raise ValueError(timefreq_error_msg, item)
# If None, automatic identification of max peak
else:
from scipy.signal import argrelmax
order = max((1, tfr.data.shape[2] // 30))
peaks_idx = argrelmax(tfr.data, order=order, axis=2)
if peaks_idx[0].size == 0:
_, p_t, p_f = np.unravel_index(tfr.data.argmax(), tfr.data.shape)
timefreqs = [(tfr.times[p_t], tfr.freqs[p_f])]
else:
peaks = [tfr.data[0, f, t] for f, t in
zip(peaks_idx[1], peaks_idx[2])]
peakmax_idx = np.argmax(peaks)
peakmax_time = tfr.times[peaks_idx[2][peakmax_idx]]
peakmax_freq = tfr.freqs[peaks_idx[1][peakmax_idx]]
timefreqs = [(peakmax_time, peakmax_freq)]
timefreqs = {
tuple(k): np.asarray(timefreqs[k]) if isinstance(timefreqs, dict)
else np.array([0, 0]) for k in timefreqs}
return timefreqs
def _preproc_tfr_instance(tfr, picks, tmin, tmax, fmin, fmax, vmin, vmax, dB,
mode, baseline, exclude, copy=True):
"""Baseline and truncate (times and freqs) a TFR instance."""
tfr = tfr.copy() if copy else tfr
exclude = None if picks is None else exclude
picks = _picks_to_idx(tfr.info, picks, exclude='bads')
pick_names = [tfr.info['ch_names'][pick] for pick in picks]
tfr.pick_channels(pick_names)
if exclude == 'bads':
exclude = [ch for ch in tfr.info['bads']
if ch in tfr.info['ch_names']]
if exclude is not None:
tfr.drop_channels(exclude)
data, times, freqs, _, _ = _preproc_tfr(
tfr.data, tfr.times, tfr.freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, tfr.info['sfreq'], copy=False)
tfr.times = times
tfr.freqs = freqs
tfr.data = data
return tfr
| bsd-3-clause |
yanboliang/spark | examples/src/main/python/sql/arrow.py | 16 | 5034 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Arrow in Spark.
Run with:
./bin/spark-submit examples/src/main/python/sql/arrow.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
def dataframe_with_arrow_example(spark):
# $example on:dataframe_with_arrow$
import numpy as np
import pandas as pd
# Enable Arrow-based columnar data transfers
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Generate a Pandas DataFrame
pdf = pd.DataFrame(np.random.rand(100, 3))
# Create a Spark DataFrame from a Pandas DataFrame using Arrow
df = spark.createDataFrame(pdf)
# Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
result_pdf = df.select("*").toPandas()
# $example off:dataframe_with_arrow$
print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe()))
def scalar_pandas_udf_example(spark):
# $example on:scalar_pandas_udf$
import pandas as pd
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
# Declare the function and create the UDF
def multiply_func(a, b):
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
# The function for a pandas_udf should be able to execute with local Pandas data
x = pd.Series([1, 2, 3])
print(multiply_func(x, x))
# 0 1
# 1 4
# 2 9
# dtype: int64
# Create a Spark DataFrame, 'spark' is an existing SparkSession
df = spark.createDataFrame(pd.DataFrame(x, columns=["x"]))
# Execute function as a Spark vectorized UDF
df.select(multiply(col("x"), col("x"))).show()
# +-------------------+
# |multiply_func(x, x)|
# +-------------------+
# | 1|
# | 4|
# | 9|
# +-------------------+
# $example off:scalar_pandas_udf$
def grouped_map_pandas_udf_example(spark):
# $example on:grouped_map_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP)
def subtract_mean(pdf):
# pdf is a pandas.DataFrame
v = pdf.v
return pdf.assign(v=v - v.mean())
df.groupby("id").apply(subtract_mean).show()
# +---+----+
# | id| v|
# +---+----+
# | 1|-0.5|
# | 1| 0.5|
# | 2|-3.0|
# | 2|-1.0|
# | 2| 4.0|
# +---+----+
# $example off:grouped_map_pandas_udf$
def grouped_agg_pandas_udf_example(spark):
# $example on:grouped_agg_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql import Window
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("double", PandasUDFType.GROUPED_AGG)
def mean_udf(v):
return v.mean()
df.groupby("id").agg(mean_udf(df['v'])).show()
# +---+-----------+
# | id|mean_udf(v)|
# +---+-----------+
# | 1| 1.5|
# | 2| 6.0|
# +---+-----------+
w = Window \
.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
df.withColumn('mean_v', mean_udf(df['v']).over(w)).show()
# +---+----+------+
# | id| v|mean_v|
# +---+----+------+
# | 1| 1.0| 1.5|
# | 1| 2.0| 1.5|
# | 2| 3.0| 6.0|
# | 2| 5.0| 6.0|
# | 2|10.0| 6.0|
# +---+----+------+
# $example off:grouped_agg_pandas_udf$
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Arrow-in-Spark example") \
.getOrCreate()
print("Running Pandas to/from conversion example")
dataframe_with_arrow_example(spark)
print("Running pandas_udf scalar example")
scalar_pandas_udf_example(spark)
print("Running pandas_udf grouped map example")
grouped_map_pandas_udf_example(spark)
spark.stop()
| apache-2.0 |
r-mart/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
aditiiyer/CERR | CERR_core/ModelImplementationLibrary/SegmentationModels/ModelDependencies/CT_HeartStructure_DeepLab/dataloaders/utils.py | 4 | 3279 | import matplotlib.pyplot as plt
import numpy as np
import torch
def decode_seg_map_sequence(label_masks, dataset='heart'):
rgb_masks = []
for label_mask in label_masks:
rgb_mask = decode_segmap(label_mask, dataset)
rgb_masks.append(rgb_mask)
rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2]))
return rgb_masks
def decode_segmap(label_mask, dataset, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
if dataset == 'heart':
n_classes = 10
label_colours = get_heart_labels()
elif dataset == 'validation':
n_classes = 10
label_colours = get_heart_struct_labels()
elif dataset == 'heart_struct' or dataset == 'heart_peri' or dataset == 'heart_ventricles' or dataset == 'heart_atria':
n_classes = 2
label_colours = get_heart_labels()
elif dataset == 'validation_struct' or dataset == 'validation_peri' or dataset == 'validation_ventricles' or dataset == 'validation_atria':
n_classes = 2
label_colours = get_heart_struct_labels()
else:
raise NotImplementedError
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def encode_segmap(mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(get_heart_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def get_heart_labels():
# return np.array with dimensions (10,3)
# [0,1,2,3,4,5,6,7,8,9]
#['unlabelled', HEART', 'AORTA', 'LA', 'LV', 'RA', 'RV', 'IVC', 'SVC', 'PA']
return np.asarray([[0, 0, 0],
[128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0]])
def get_heart_struct_labels():
# return np.array with dimensions (2,3)
# [0,1]
#['unlabelled', HEART']
return np.asarray([[0, 0, 0],
[128, 0, 0]])
| lgpl-2.1 |
siutanwong/scikit-learn | sklearn/utils/tests/test_testing.py | 144 | 4121 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
Natetempid/nearfield | analyze_nfdata_tk/analyze_nfdata_tk/data_interpreter.py | 1 | 8669 | from __future__ import print_function
import sys
import Tkinter as tk
import ttk
import numpy as np
import os
import datetime
from scipy import interpolate
import tkFileDialog
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
import matplotlib.animation as animation
import datetime
class data_interpreter(tk.Frame):
#single instrument
def __init__(self, master, root, name, file, plot_index):
tk.Frame.__init__(self, master)
self.config(border = 2)
self.master = master
self.root = root
self.plot_frame_row = -1
self.plot_frame_column = -1
self.grid_rowconfigure(0,weight = 1)
self.grid_columnconfigure(0, weight = 1)
self.name = name #this will be plot title
self.file = file #open file not file path
self.plot_index = plot_index #where to plot the data
self.time = []
self.data = []
self.units = []
#read data
self.read_data()
#plot_data
self.fig = plt.Figure()#figsize=(10,10))
self.ax = self.fig.add_subplot(1,1,1)
self.line, = self.ax.plot(self.time, self.data)
self.ax.set_title(self.name)
self.canvas = FigureCanvasTkAgg(self.fig,self)
self.canvas.show()
self.canvas_widget = self.canvas.get_tk_widget()
self.canvas_widget.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.canvas._tkcanvas.grid(row = 0, column = 0, sticky = 'nsew')
self.selected = False
self.selected_intervals = [] #times for the beginning and end of each interval
self.selected_vlines = [] #lines to place at selected intervals
self.selected_time = [] #times within interval to plot
self.selected_data = [] #cooresponding data to plot
self.mean_time = []
self.mean_data = []
self.std_dev = []
self.getting_coordinates = False
self.connectid = None
#select button
self.select_btn = ttk.Button(self, text = 'Select Graph', command = lambda: self.select_graph())
self.select_btn.grid(row = 1, column = 0, sticky = 'nsew')
def reinit_toselectframe(self, master):
self.master = master #overwrite previous master frame
tk.Frame.__init__(self, self.master)
self.grid_rowconfigure(0, weight = 1)
self.grid_columnconfigure(0, weight = 1)
self.config(border = 2, relief = tk.GROOVE)
self.fig = plt.Figure()
self.ax = self.fig.add_subplot(1,1,1)
self.line, = self.ax.plot(self.time, self.data)
self.ax.set_title(self.name)
self.canvas = FigureCanvasTkAgg(self.fig, self)
self.canvas.show()
self.canvas_widget = self.canvas.get_tk_widget()
self.canvas_widget.grid(row = 0, column = 0, sticky = 'nsew')#pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.canvas._tkcanvas.grid(row = 0, column = 0, sticky = 'nsew')
self.navigator_frame = tk.Frame(self)
self.navigator_frame.grid(row = 1, column = 0, sticky = 'nsew')
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.navigator_frame)
self.toolbar.update()
self.get_coordinates_btn = ttk.Button(self, text = 'Select Intervals', command = lambda: self.get_coordinates())
self.get_coordinates_btn.grid(row = 2, column = 0, sticky = 'nsew')
def reinit_toplotframe(self, master):
self.master = master #overwrite previous master frame
tk.Frame.__init__(self, self.master)
self.grid_rowconfigure(0, weight = 1)
self.grid_columnconfigure(0, weight = 1)
self.config(border = 2, relief = tk.GROOVE)
self.fig = plt.Figure()
self.ax = self.fig.add_subplot(1,1,1)
self.line, = self.ax.plot(self.time, self.data)
self.ax.set_title(self.name)
self.canvas = FigureCanvasTkAgg(self.fig, self)
self.canvas.show()
self.canvas_widget = self.canvas.get_tk_widget()
self.canvas_widget.grid(row = 0, column = 0, sticky = 'nsew')#pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.canvas._tkcanvas.grid(row = 0, column = 0, sticky = 'nsew')
self.select_btn = ttk.Button(self, text = 'Select Graph', command = lambda: self.select_graph())
self.select_btn.grid(row = 1, column = 0, sticky = 'nsew')
def read_data(self):
for line in self.file:
datalist = line.split(',')
if len(datalist) > 1:
try:
self.time.append(float((datetime.datetime.strptime(datalist[0], '%Y-%m-%d %H:%M:%S.%f') - datetime.datetime(1970,1,1)).total_seconds()))
self.data.append(float(datalist[1]))
except ValueError:
try:
self.time.append(float((datetime.datetime.strptime(datalist[0], '%Y-%m-%d %H:%M:%S') - datetime.datetime(1970,1,1)).total_seconds()))
self.data.append(float(datalist[1]))
except ValueError:
pass
if len(datalist) > 2:
self.units.append(datalist[2])
else:
self.units.append(None)
def select_graph(self):
if self.selected:
self.select_btn.config(text = 'Select Graph')
self.config(relief = tk.FLAT)
else:
self.select_btn.config(text = 'Unselect Graph')
self.config(relief = tk.GROOVE)
self.selected = not self.selected
def remove_plot(self):
self.ax.lines.remove(self.line)
def get_coordinates(self):
if self.getting_coordinates is None: #then button has been pressed just after initilization and need to start getting interals
self.get_coordinates_btn.config(text = 'Stop Selecting Intervals... ')
self.connectid = self.canvas.mpl_connect('button_press_event', self.on_click)
self.getting_coordinates = True
else:
if self.getting_coordinates:
#then stop getting coordinates
self.get_coordinates_btn.config(text = 'Updating Curves...')
#send selected coordinates to root
self.root.selected_intervals = self.selected_intervals
try:
self.canvas.mpl_disconnect(self.connectid)
self.root.use_intervals()
self.get_coordinates_btn.config(text = 'Select Intervals')
except SystemError:
pass
else:
#then something went wrong. Logic shouldn't allow this code to run
self.get_coordinates_btn.config(text = 'Stop Selecting Intervals... ')
self.connectid = self.canvas.mpl_connect('button_press_event', self.on_click)
self.getting_coordinates = not self.getting_coordinates
def on_click(self, event):
#get the x and y coords, flip y from top to bottom
x, y = event.x, event.y
if event.button == 1:
if event.inaxes is not None:
print('data coords %f %f' % (event.xdata, event.ydata))
#self.root.selected_times.append(event.xdata)
self.selected_intervals.append(event.xdata)
#plot vertical lines on this interpreter
line = self.ax.axvline(x = event.xdata, color = 'r')
self.selected_vlines.append(line)
#plot vertical lines on other selected interpreters
for interpreter in self.root.selected_interpreter_list:
if interpreter.name != self.name: #only do this for other selected interpreters
other_line = interpreter.ax.axvline(x = event.xdata, color = 'r')
interpreter.selected_vlines.append(other_line)
interpreter.canvas.draw()
if event.button == 3:
if event.inaxes is not None:
self.selected_intervals = self.selected_intervals[:-1]
self.ax.lines.remove(self.selected_vlines[-1])
#remove vertical lines from other selected interpreters
for interpreter in self.root.selected_interpreter_list:
if interpreter.name != self.name: #only do this for other selected interpreters
interpreter.ax.lines.remove(interpreter.selected_vlines[-1])
interpreter.canvas.draw()
self.canvas.draw()
| gpl-3.0 |
doctornerdis/index_translationum | visualization.py | 1 | 1451 | import pandas as pd
import numpy as np
import glob as glob
import matplotlib.pyplot as plt
import seaborn as sns
# Aggregating all the data
file_pattern = "/Users/nicholascifuentes-goodbody/Documents/GitHub/index_translationum/data/*.csv"
list_data = []
csv_files = glob.glob(file_pattern)
for filename in csv_files:
df = pd.read_csv(filename, parse_dates=["TT_YEAR"], infer_datetime_format=True)
list_data.append(df)
results = pd.concat(list_data, ignore_index = True)
# Pulling out columns of interest and filtering by three languages
results = results[["ST_LANG", "TT_LANG", "TT_YEAR", "TT_PUB_COUNTRY", "BIRTH_COUNTRY"]]
indices = (results["ST_LANG"] == "French") | (results["ST_LANG"] == "Spanish") | (results["ST_LANG"] == "Portuguese")
clean_df = results.loc[indices,:]
#counts = clean_df.set_index("TT_YEAR")
#counts = counts.groupby('ST_LANG').ST_LANG.count()
#print(counts.head())
# Plotting
ymin = pd.to_datetime("1920")
ymax = pd.to_datetime("2015")
plt.subplot(2,1,1)
sns.stripplot(x="ST_LANG", y="TT_YEAR", data=clean_df, size=3, jitter=True)
plt.ylim(ymin, ymax)
plt.ylabel("Year of Publication")
plt.xlabel("Source Language")
plt.title("Translations into Arabic, 1920 - 2015")
plt.subplot(2,1,2)
sns.stripplot(x="TT_PUB_COUNTRY", y="TT_YEAR", data=clean_df, size=3, jitter=True)
plt.xticks(rotation='vertical')
plt.ylabel("Year of Publication")
plt.xlabel("Country of Publication")
plt.ylim(ymin, ymax)
plt.show()
| mit |
chawins/aml | parameters_yolo.py | 1 | 1410 | # Import packages for all files
import os
import pickle
import random
import threading
import time
from os import listdir
import cv2
import keras
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from pandas.io.parsers import read_csv
from scipy import misc
from tensorflow.contrib.opt import ScipyOptimizerInterface
# Set constants
NUM_LABELS = 43 # Number of labels
BATCH_SIZE = 32 # Size of batch
HEIGHT = 608
WIDTH = 608
N_CHANNEL = 3 # Number of channels
OUTPUT_DIM = (19, 19, 425) # Number of output dimension
NUM_EPOCH = 100 # Number of epoch to train
LR = 0.0001 # Learning rate
L2_LAMBDA = 0.0001 # Lambda for l2 regularization
# Set paths
# Path to saved weights
WEIGTHS_PATH = "./keras_weights/weights_mltscl_dataaug.hdf5"
# Path to directory containing dataset
DATA_DIR = "./input_data/"
INPUT_SHAPE = (1, HEIGHT, WIDTH, N_CHANNEL) # Input shape of model
IMG_SHAPE = (HEIGHT, WIDTH, N_CHANNEL) # Image shape
IMAGE_SIZE = (HEIGHT, WIDTH) # Height and width of resized image
N_FEATURE = HEIGHT * WIDTH * N_CHANNEL # Number of input dimension
| mit |
simonsfoundation/CaImAn | demos/obsolete/demo_caiman_patches.py | 2 | 10389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 24 18:39:45 2016
@author: Andrea Giovannucci
For explanation consult at https://github.com/agiovann/Constrained_NMF/releases/download/v0.4-alpha/Patch_demo.zip
and https://github.com/agiovann/Constrained_NMF
"""
from __future__ import division
from __future__ import print_function
#%%
from builtins import str
from builtins import range
from past.utils import old_div
import caiman.source_extraction.cnmf.params
try:
if __IPYTHON__:
# this is used for debugging purposes only. allows to reload classes when changed
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
print('Not launched under iPython')
import sys
import numpy as np
from time import time
from scipy.sparse import coo_matrix
import psutil
import glob
import os
import scipy
from ipyparallel import Client
import matplotlib as mpl
# mpl.use('Qt5Agg')
import pylab as pl
pl.ion()
#%%
import caiman as cm
from caiman.components_evaluation import evaluate_components
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.base.rois import extract_binary_masks_blob
import caiman.source_extraction.cnmf as cnmf
#%%
# frame rate in Hz
final_frate = 10
#%%
# backend='SLURM'
backend = 'local'
if backend == 'SLURM':
n_processes = np.int(os.environ.get('SLURM_NPROCS'))
else:
# roughly number of cores on your machine minus 1
n_processes = np.maximum(np.int(psutil.cpu_count()), 1)
print(('using ' + str(n_processes) + ' processes'))
#%% start cluster for efficient computation
single_thread = False
if single_thread:
dview = None
else:
try:
c.close()
except:
print('C was not existing, creating one')
print("Stopping cluster to avoid unnencessary use of memory....")
sys.stdout.flush()
if backend == 'SLURM':
try:
cm.stop_server(is_slurm=True)
except:
print('Nothing to stop')
slurm_script = '/mnt/xfs1/home/agiovann/SOFTWARE/Constrained_NMF/SLURM/slurmStart.sh'
cm.start_server(slurm_script=slurm_script)
pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
c = Client(ipython_dir=pdir, profile=profile)
else:
cm.stop_server()
cm.start_server()
c = Client()
print(('Using ' + str(len(c)) + ' processes'))
dview = c[:len(c)]
#%% FOR LOADING ALL TIFF FILES IN A FILE AND SAVING THEM ON A SINGLE MEMORY MAPPABLE FILE
fnames = []
base_folder = './example_movies/' # folder containing the demo files
for file in glob.glob(os.path.join(base_folder, '*.tif')):
if file.endswith("ie.tif"):
fnames.append(os.path.abspath(file))
fnames.sort()
if len(fnames) == 0:
raise Exception("Could not find any tiff file")
print(fnames)
fnames = fnames
#%%
# idx_x=slice(12,500,None)
# idx_y=slice(12,500,None)
# idx_xy=(idx_x,idx_y)
downsample_factor = 1 # use .2 or .1 if file is large and you want a quick answer
idx_xy = None
base_name = 'Yr'
name_new = cm.save_memmap_each(fnames, dview=dview, base_name=base_name, resize_fact=(
1, 1, downsample_factor), remove_init=0, idx_xy=idx_xy)
name_new.sort()
print(name_new)
#%%
name_new = cm.save_memmap_each(fnames, dview=dview, base_name='Yr', resize_fact=(
1, 1, 1), remove_init=0, idx_xy=None)
name_new.sort()
#%%
fname_new = cm.save_memmap_join(
name_new, base_name='Yr', n_chunks=12, dview=dview)
#%%
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
Y = np.reshape(Yr, dims + (T,), order='F')
#%% visualize correlation image
Cn = cm.local_correlations(Y)
pl.imshow(Cn, cmap='gray')
#%%
rf = 10 # half-size of the patches in pixels. rf=25, patches are 50x50
stride = 2 # amounpl.it of overlap between the patches in pixels
K = 3 # number of neurons expected per patch
gSig = [7, 7] # expected half size of neurons
merge_thresh = 0.8 # merging threshold, max correlation allowed
p = 2 # order of the autoregressive system
memory_fact = 1 # unitless number accounting how much memory should be used. You will need to try different values to see which one would work the default is OK for a 16 GB system
save_results = True
#%% RUN ALGORITHM ON PATCHES
options_patch = caiman.source_extraction.cnmf.params.CNMFParams(dims, K=K, gSig=gSig, ssub=1, tsub=4, p=0, thr=merge_thresh)
A_tot, C_tot, YrA_tot, b, f, sn_tot, optional_outputs = cnmf.map_reduce.run_CNMF_patches(fname_new, (d1, d2, T), options_patch, rf=rf, stride=stride,
dview=dview, memory_fact=memory_fact, gnb=1)
print(('Number of components:' + str(A_tot.shape[-1])))
#%%
if save_results:
np.savez('results_analysis_patch.npz', A_tot=A_tot.todense(),
C_tot=C_tot, sn_tot=sn_tot, d1=d1, d2=d2, b=b, f=f)
#%% if you have many components this might take long!
pl.figure()
crd = plot_contours(A_tot, Cn, thr=0.9)
# %% set parameters for full field of view analysis
options = caiman.source_extraction.cnmf.params.CNMFParams(dims, K=A_tot.shape[-1], gSig=gSig, p=0, thr=merge_thresh)
pix_proc = np.minimum(np.int((d1 * d2) / n_processes / (old_div(T, 2000.))),
np.int(old_div((d1 * d2), n_processes))) # regulates the amount of memory used
options['spatial_params']['n_pixels_per_process'] = pix_proc
options['temporal_params']['n_pixels_per_process'] = pix_proc
#%% merge spatially overlaping and temporally correlated components
A_m, C_m, nr_m, merged_ROIs, S_m, bl_m, c1_m, sn_m, g_m = cnmf.merging.merge_components(Yr, A_tot, [], np.array(C_tot), [], np.array(
C_tot), [], options['temporal_params'], options['spatial_params'], dview=dview, thr=options['merging']['thr'], mx=np.Inf)
#%% update temporal to get Y_r
options['temporal_params']['p'] = 0
# change ifdenoised traces time constant is wrong
options['temporal_params']['fudge_factor'] = 0.96
options['temporal_params']['backend'] = 'ipyparallel'
C_m, A_m, b, f_m, S_m, bl_m, c1_m, neurons_sn_m, g2_m, YrA_m = cnmf.temporal.update_temporal_components(
Yr, A_m, b, C_m, f, dview=dview, bl=None, c1=None, sn=None, g=None, **options['temporal_params'])
#%% get rid of evenrually noisy components.
# But check by visual inspection to have a feeling fot the threshold. Try to be loose, you will be able to get rid of more of them later!
tB = np.minimum(-2, np.floor(-5. / 30 * final_frate))
tA = np.maximum(5, np.ceil(25. / 30 * final_frate))
Npeaks = 10
traces = C_m + YrA_m
# traces_a=traces-scipy.ndimage.percentile_filter(traces,8,size=[1,np.shape(traces)[-1]/5])
# traces_b=np.diff(traces,axis=1)
fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples =\
evaluate_components(Y, traces, A_m, C_m, b, f_m,
remove_baseline=True, N=5, robust_std=False, Athresh=0.1, Npeaks=Npeaks, tB=tB, tA=tA, thresh_C=0.3)
idx_components_r = np.where(r_values >= .5)[0]
idx_components_raw = np.where(fitness_raw < -20)[0]
idx_components_delta = np.where(fitness_delta < -10)[0]
idx_components = np.union1d(idx_components_r, idx_components_raw)
idx_components = np.union1d(idx_components, idx_components_delta)
idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)
print(' ***** ')
print((len(traces)))
print((len(idx_components)))
#%%
A_m = A_m[:, idx_components]
C_m = C_m[idx_components, :]
#%% display components DO NOT RUN IF YOU HAVE TOO MANY COMPONENTS
pl.figure()
crd = plot_contours(A_m, Cn, thr=0.9)
#%%
print(('Number of components:' + str(A_m.shape[-1])))
#%% UPDATE SPATIAL OCMPONENTS
t1 = time()
A2, b2, C2, f = cnmf.spatial.update_spatial_components(
Yr, C_m, f, A_m, sn=sn_tot, dview=dview, dims=dims, **options['spatial_params'])
print((time() - t1))
#%% UPDATE TEMPORAL COMPONENTS
options['temporal_params']['p'] = p
# change ifdenoised traces time constant is wrong
options['temporal_params']['fudge_factor'] = 0.96
C2, A2, b2, f2, S2, bl2, c12, neurons_sn2, g21, YrA = cnmf.temporal.update_temporal_components(
Yr, A2, b2, C2, f, dview=dview, bl=None, c1=None, sn=None, g=None, **options['temporal_params'])
#%% stop server and remove log files
log_files = glob.glob('Yr*_LOG_*')
for log_file in log_files:
os.remove(log_file)
#%% order components according to a quality threshold and only select the ones wiht qualitylarger than quality_threshold.
B = np.minimum(-2, np.floor(-5. / 30 * final_frate))
tA = np.maximum(5, np.ceil(25. / 30 * final_frate))
Npeaks = 10
traces = C2 + YrA
# traces_a=traces-scipy.ndimage.percentile_filter(traces,8,size=[1,np.shape(traces)[-1]/5])
# traces_b=np.diff(traces,axis=1)
fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = evaluate_components(
Y, traces, A2, C2, b2, f2, remove_baseline=True, N=5, robust_std=False, Athresh=0.1, Npeaks=Npeaks, tB=tB, tA=tA, thresh_C=0.3)
idx_components_r = np.where(r_values >= .6)[0]
idx_components_raw = np.where(fitness_raw < -60)[0]
idx_components_delta = np.where(fitness_delta < -20)[0]
min_radius = gSig[0] - 2
masks_ws, idx_blobs, idx_non_blobs = extract_binary_masks_blob(
A2.tocsc(), min_radius, dims, num_std_threshold=1,
minCircularity=0.6, minInertiaRatio=0.2, minConvexity=.8)
idx_components = np.union1d(idx_components_r, idx_components_raw)
idx_components = np.union1d(idx_components, idx_components_delta)
idx_blobs = np.intersect1d(idx_components, idx_blobs)
idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)
print(' ***** ')
print((len(traces)))
print((len(idx_components)))
print((len(idx_blobs)))
#%% visualize components
# pl.figure();
pl.subplot(1, 3, 1)
crd = plot_contours(A2.tocsc()[:, idx_components], Cn, thr=0.9)
pl.subplot(1, 3, 2)
crd = plot_contours(A2.tocsc()[:, idx_blobs], Cn, thr=0.9)
pl.subplot(1, 3, 3)
crd = plot_contours(A2.tocsc()[:, idx_components_bad], Cn, thr=0.9)
#%%
view_patches_bar(Yr, scipy.sparse.coo_matrix(A2.tocsc()[
:, idx_components]), C2[idx_components, :], b2, f2, dims[0], dims[1], YrA=YrA[idx_components, :], img=Cn)
#%%
view_patches_bar(Yr, scipy.sparse.coo_matrix(A2.tocsc()[
:, idx_components_bad]), C2[idx_components_bad, :], b2, f2, dims[0], dims[1], YrA=YrA[idx_components_bad, :], img=Cn)
#%% STOP CLUSTER
pl.close()
if not single_thread:
c.close()
cm.stop_server()
| gpl-2.0 |
WEP11/NEXpy | level3.py | 1 | 6752 | #######################################################
# #
# PYTHON NEXRAD PLOT GENERATION #
# #
# Level-3, Single Site #
# #
# Warren Pettee (@wpettee) #
# #
# #
#######################################################
import argparse
import sys
import time
from datetime import datetime, timedelta
import threading
from threading import Thread
from siphon.radarserver import RadarServer
from siphon.cdmr import Dataset
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.animation as animation
mpl.rcParams['toolbar'] = 'None'
import cartopy
from metpy.plots import ctables
import validation
#------------------------------------------------------
def update(frame):
global SITE
global PRODUCT
print("Building Frame:",frame)
#What is the age of this frame?
frameIndex = frame % 9
frameAge = frameIndex * 10 # minutes old
# WHAT TIME WILL THE FRAME BE??
date = datetime.utcnow() - timedelta(minutes=frameAge)
year = date.year
month = date.month
day = date.day
hour = date.hour
minute = date.minute
# What type of radar site is this?..
siteType = validation.checkRadarType(SITE)
ncfVar = validation.checkProduct(PRODUCT)
colorTable = validation.checkColorTable(PRODUCT)
if siteType=='88D':
rs = RadarServer('http://thredds.ucar.edu/thredds/radarServer/nexrad/level3/IDD/')
elif siteType=='TDWR':
rs = RadarServer('http://thredds.ucar.edu/thredds/radarServer/terminal/level3/IDD/')
else:
print('INVALID SITE IDENTIFIER')
sys.exit()
# ACQUIRE DATA ----------------------------------------
query = rs.query()
query.stations(args.site).time(datetime(year,month,day,hour,minute)).variables(args.product)
rs.validate_query(query)
catalog = rs.get_catalog(query)
catalog.datasets
ds = list(catalog.datasets.values())[0]
ds.access_urls
# READ DATA ------------------------------------------
data = Dataset(ds.access_urls['CdmRemote'])
rng = data.variables['gate'][:]
az = data.variables['azimuth'][:]
ref = data.variables[ncfVar][:]
x = (rng * np.sin(np.deg2rad(az))[:, None])
y = (rng * np.cos(np.deg2rad(az))[:, None])
ref = np.ma.array(ref, mask=np.isnan(ref))
plot = ax.pcolormesh(x, y, ref, cmap=cmap, norm=norm, zorder=2)
title_line1 = '%s %s - %i:%i' % (args.site,args.product,hour,minute)
plt.title(title_line1,color='k',fontsize=18,fontweight='bold',style='italic')
return plot
# Command Line Functions:
parser = argparse.ArgumentParser(description='NEXRAD/TDWR Site Information')
parser.add_argument('site',
help='3-Letter Site Identifier (WSR-88D or TDWR)')
parser.add_argument('product',
help='3-Letter Product Identifier (See RPCCDS list at NWS)\nExample:Reflectivity(N0R or TR0)\nVelocity(N0V or TV0)')
parser.add_argument('animate',
help='Build animated image? "true" or "false"')
args = parser.parse_args()
SITE=args.site
PRODUCT=args.product
# WHAT TIME IS IT? ------------------------------------
date = datetime.utcnow()
year = datetime.utcnow().year
month = datetime.utcnow().month
day = datetime.utcnow().day
hour = datetime.utcnow().hour
minute = datetime.utcnow().minute
# What type of radar site is this?..
siteType = validation.checkRadarType(args.site)
ncfVar = validation.checkProduct(args.product)
colorTable = validation.checkColorTable(args.product)
if siteType=='88D':
rs = RadarServer('http://thredds.ucar.edu/thredds/radarServer/nexrad/level3/IDD/')
elif siteType=='TDWR':
rs = RadarServer('http://thredds.ucar.edu/thredds/radarServer/terminal/level3/IDD/')
else:
print('INVALID SITE IDENTIFIER')
sys.exit()
# ACQUIRE DATA ----------------------------------------
query = rs.query()
query.stations(args.site).time(datetime(year,month,day,hour,minute)).variables(args.product)
rs.validate_query(query)
catalog = rs.get_catalog(query)
catalog.datasets
ds = list(catalog.datasets.values())[0]
ds.access_urls
# READ DATA ------------------------------------------
print("--- AQUIRING DATA ---")
data = Dataset(ds.access_urls['CdmRemote'])
print("Aquisition Complete!!")
#print (data.variables) ### DEBUG
rng = data.variables['gate'][:]
az = data.variables['azimuth'][:]
ref = data.variables[ncfVar][:]
x = (rng * np.sin(np.deg2rad(az))[:, None])
y = (rng * np.cos(np.deg2rad(az))[:, None])
ref = np.ma.array(ref, mask=np.isnan(ref))
# BEGIN PLOTTING --------------------------------------------------
### TODO: Losing loading time in county rendering.. fix me
# Create projection centered on the radar. This allows us to use x
# and y relative to the radar.
proj = cartopy.crs.LambertConformal(central_longitude=data.RadarLongitude,
central_latitude=data.RadarLatitude)
# New figure with specified projection
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1, projection=proj)
print("Loading geography overlays...")
# Grab state borders
#state_borders = cartopy.feature.NaturalEarthFeature(
# category='cultural', name='admin_1_states_provinces_lines',
# scale='50m', facecolor='none')
#ax.add_feature(state_borders, edgecolor='black', linewidth=2, zorder=2)
# Counties
counties = cartopy.io.shapereader.Reader('data/counties')
ax.add_geometries(counties.geometries(), cartopy.crs.PlateCarree(),
facecolor='#C2A385', edgecolor='grey', zorder=1)
# Interstates
interstate = cartopy.io.shapereader.Reader('data/interstates')
ax.add_geometries(interstate.geometries(), cartopy.crs.PlateCarree(),
facecolor='none', edgecolor='#B20000', zorder=1)
# Hydrography
#hydro = cartopy.io.shapereader.Reader('data/hydro')
#ax.add_geometries(hydro.geometries(), cartopy.crs.PlateCarree(),
# facecolor='none', edgecolor='#ADD6FF', zorder=1)
# Set limits in lat/lon space
# LonW, LonE, LatN, LatS
#ax.set_extent([-81.8, -80, 36, 34.5])
print("Building Figure...")
norm, cmap = ctables.registry.get_with_steps(colorTable, 5, 5)
plot = ax.pcolormesh(x, y, ref, cmap=cmap, norm=norm, zorder=2)
#ax.contourf(x, y, ref, cmap=cmap, norm=norm, zorder=2)
title_line1 = '%s %s - %i:%i' % (args.site,args.product,hour,minute)
plt.title(title_line1,color='k',fontsize=18,fontweight='bold',style='italic')
if args.animate == 'true':
animation = animation.FuncAnimation(fig, update, interval=15, blit=False, frames=10)
animation.save('nexpy.gif', writer='imagemagick', fps=15, dpi=40)
plt.show()
| gpl-3.0 |
deepchem/deepchem | examples/low_data/toxcast_maml.py | 4 | 3781 | from __future__ import print_function
import deepchem as dc
import numpy as np
import tensorflow as tf
from sklearn.metrics import accuracy_score
# Load the data.
tasks, datasets, transformers = dc.molnet.load_toxcast()
(train_dataset, valid_dataset, test_dataset) = datasets
x = train_dataset.X
y = train_dataset.y
w = train_dataset.w
n_features = x.shape[1]
n_molecules = y.shape[0]
n_tasks = y.shape[1]
# Toxcast has data on 6874 molecules and 617 tasks. However, the data is very
# sparse: most tasks do not include data for most molecules. It also is very
# unbalanced: there are many more negatives than positives. For each task,
# create a list of alternating positives and negatives so each batch will have
# equal numbers of both.
task_molecules = []
for i in range(n_tasks):
positives = [j for j in range(n_molecules) if w[j, i] > 0 and y[j, i] == 1]
negatives = [j for j in range(n_molecules) if w[j, i] > 0 and y[j, i] == 0]
np.random.shuffle(positives)
np.random.shuffle(negatives)
mols = sum((list(m) for m in zip(positives, negatives)), [])
task_molecules.append(mols)
# Define a MetaLearner describing the learning problem.
class ToxcastLearner(dc.metalearning.MetaLearner):
def __init__(self):
self.n_training_tasks = int(n_tasks * 0.8)
self.batch_size = 10
self.batch_start = [0] * n_tasks
self.set_task_index(0)
self.w1 = tf.Variable(
np.random.normal(size=[n_features, 1000], scale=0.02), dtype=tf.float32)
self.w2 = tf.Variable(
np.random.normal(size=[1000, 1], scale=0.02), dtype=tf.float32)
self.b1 = tf.Variable(np.ones(1000), dtype=tf.float32)
self.b2 = tf.Variable(np.zeros(1), dtype=tf.float32)
def compute_model(self, inputs, variables, training):
x, y = [tf.cast(i, tf.float32) for i in inputs]
w1, w2, b1, b2 = variables
dense1 = tf.nn.relu(tf.matmul(x, w1) + b1)
logits = tf.matmul(dense1, w2) + b2
output = tf.sigmoid(logits)
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))
return loss, [output]
@property
def variables(self):
return [self.w1, self.w2, self.b1, self.b2]
def set_task_index(self, index):
self.task = index
def select_task(self):
self.set_task_index((self.task + 1) % self.n_training_tasks)
def get_batch(self):
task = self.task
start = self.batch_start[task]
mols = task_molecules[task][start:start + self.batch_size]
labels = np.zeros((self.batch_size, 1))
labels[np.arange(self.batch_size), 0] = y[mols, task]
if start + 2 * self.batch_size > len(task_molecules[task]):
self.batch_start[task] = 0
else:
self.batch_start[task] += self.batch_size
return [x[mols, :], labels]
# Run meta-learning on 80% of the tasks.
n_epochs = 20
learner = ToxcastLearner()
maml = dc.metalearning.MAML(learner)
steps = n_epochs * learner.n_training_tasks // maml.meta_batch_size
maml.fit(steps)
# Validate on the remaining tasks.
def compute_scores(optimize):
maml.restore()
y_true = []
y_pred = []
losses = []
for task in range(learner.n_training_tasks, n_tasks):
learner.set_task_index(task)
if optimize:
maml.train_on_current_task(restore=True)
inputs = learner.get_batch()
loss, prediction = maml.predict_on_batch(inputs)
y_true.append(inputs[1])
y_pred.append(prediction[0][:, 0])
losses.append(loss)
y_true = np.concatenate(y_true)
y_pred = np.concatenate(y_pred)
print()
print('Cross entropy loss:', np.mean(losses))
print('Prediction accuracy:', accuracy_score(y_true, y_pred > 0.5))
print('ROC AUC:', dc.metrics.roc_auc_score(y_true, y_pred))
print()
print('Before fine tuning:')
compute_scores(False)
print('After fine tuning:')
compute_scores(True)
| mit |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/util/doctools.py | 11 | 6612 | import numpy as np
import pandas as pd
import pandas.compat as compat
class TablePlotter(object):
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df):
"""Calcurate table chape considering index levels"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical):
"""Calcurate appropriate figure size based on left and right data"""
if vertical:
# calcurate required number of cells
vcells = max(sum([self._shape(l)[0] for l in left]), self._shape(right)[0])
hcells = max([self._shape(l)[1] for l in left]) + self._shape(right)[1]
else:
vcells = max([self._shape(l)[0] for l in left] + [self._shape(right)[0]])
hcells = sum([self._shape(l)[1] for l in left] + [self._shape(right)[1]])
return hcells, vcells
def plot(self, left, right, labels=None, vertical=True):
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if not isinstance(left, list):
left = [left]
left = [self._conv(l) for l in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max([self._shape(l)[1] for l in left])
max_left_rows = max([self._shape(l)[0] for l in left])
for i, (l, label) in enumerate(zip(left, labels)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, l, title=label, height=1.0/max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title='Result', height=1.05/vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max([self._shape(df)[0] for df in left + [right]])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for l, label in zip(left, labels):
sp = self._shape(l)
ax = fig.add_subplot(gs[0, i:i+sp[1]])
self._make_table(ax, l, title=label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title='Result', height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""Convert each input to appropriate for table outplot"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name='')
else:
data = data.to_frame()
data = data.fillna('NaN')
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, 'Index', data.index)
else:
for i in range(idx_nlevels):
data.insert(i, 'Index{0}'.format(i), data.index.get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns.get_level_values(0)
values = [data.columns.get_level_values(i).values for i in range(1, col_nlevels)]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title, height=None):
if df is None:
ax.set_visible(False)
return
import pandas.tools.plotting as plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in compat.iteritems(props['celld']):
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor('#AAAAAA')
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis('off')
if __name__ == "__main__":
import pandas as pd
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({'A': [10, 11, 12],
'B': [20, 21, 22],
'C': [30, 31, 32]})
df2 = pd.DataFrame({'A': [10, 12],
'C': [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]),
labels=['df1', 'df2'], vertical=True)
plt.show()
df3 = pd.DataFrame({'X': [10, 12],
'Z': [30, 32]})
p.plot([df1, df3], pd.concat([df1, df3], axis=1),
labels=['df1', 'df2'], vertical=False)
plt.show()
idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'),
(2, 'A'), (2, 'B'), (2, 'C')])
col = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')])
df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6],
'v2': [5, 6, 7, 8, 9, 10]},
index=idx)
df3.columns = col
p.plot(df3, df3, labels=['df3'])
plt.show()
| artistic-2.0 |
shamrt/jsPASAT | scripts/compile_data.py | 1 | 17970 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A script that parses raw data output by jsPASAT (jsPsych), compiles each
participant's data and creates/updates a file in jsPASAT's ``data``
directory.
"""
import os
import glob
import json
import pandas as pd
import numpy as np
from scipy import stats
PROJECT_DIR = os.path.abspath(os.path.join(__file__, '..', '..'))
DATA_DIR = os.path.join(PROJECT_DIR, 'data')
ROUND_NDIGITS = 9
def get_csv_paths(basedir, exp_stage):
"""Take base data directory and experiment stage. Return list of file paths.
"""
glob_path = os.path.join(basedir, exp_stage, '*.csv')
return glob.glob(glob_path)
def get_csv_as_dataframe(path):
"""Take CSV path. Return pandas dataframe.
"""
return pd.DataFrame.from_csv(path, index_col='trial_index_global')
def compile_practice_data(df):
"""Take pandas dataframe and compile key variables. Return dict.
"""
compiled_data = {}
# participant ID
participant_id_col = df['participant_id'].values
compiled_data['id'] = participant_id_col[0]
# was the second practice block completed successfully?
passed_practice = ('0-0.5-0' in df['internal_chunk_id'].values)
compiled_data['passed_practice'] = passed_practice
# time taken to complete practice blocks
time_block_1_start_ms = int(df.ix[1]['time_elapsed'])
time_block_1_end_ms = int(df.ix[7]['time_elapsed'])
time_practice_blk1_ms = time_block_1_end_ms - time_block_1_start_ms
compiled_data['time_practice_blk1_ms'] = time_practice_blk1_ms
time_block_2_start_ms = int(df.ix[10]['time_elapsed'])
time_block_2_end_ms = int(df.ix[26]['time_elapsed'])
time_practice_blk2_ms = time_block_2_end_ms - time_block_2_start_ms
compiled_data['time_practice_blk2_ms'] = time_practice_blk2_ms
# time taken to complete entire practice
time_practice_ms = int(df.ix[df.last_valid_index()]['time_elapsed'])
compiled_data['time_practice_ms'] = time_practice_ms
return compiled_data
def get_response_from_json(string, question_number=0):
"""Take JSON string representing a survey response and decode.
Return target question answer string.
"""
decoder = json.JSONDecoder()
resp_json = decoder.decode(string)
target_question = "Q{}".format(question_number)
resp = resp_json[target_question] if target_question in resp_json else None
return resp
def summarize_pasat_chunk(df):
"""Take pandas dataframe representing raw PASAT chunk data and summarize.
Return dict.
"""
summary = {}
block_type_col = df['block_type'].dropna().values
summary['block_type'] = block_type_col[0]
# summarize performance
raw_trials = df.loc[df['trial_type'] == 'multi-stim-multi-response']
trials = list(raw_trials['correct'].values)
trials.pop(0) # remove fixation data
accuracy = float(trials.count(True)) / len(trials)
summary['accuracy'] = round(accuracy, ROUND_NDIGITS)
# affective ratings
ratings_json = df.ix[df.last_valid_index()]['responses']
raw_effort_rating = get_response_from_json(ratings_json)
summary['effort'] = int(raw_effort_rating[0])
raw_discomfort_rating = get_response_from_json(ratings_json, 1)
summary['discomfort'] = int(raw_discomfort_rating[0])
# get time elapsed (in minutes) at ratings for later slope calculations
ratings_time_ms = df.ix[df.last_valid_index()]['time_elapsed']
summary['ratings_time_min'] = round(
ratings_time_ms / 1000 / 60.0, ROUND_NDIGITS)
return summary
def _calculate_ratings_proportions(ratings):
"""Given a list of ratings integers, calcuate the number of changes.
Return dict indicating proportion of increases, decreases, and no-changes.
"""
def changes_prop(changes):
"""Calculate changes as a proportion of possible changes in main list.
"""
possible_changes = (len(ratings) - 1)
return round(float(len(changes)) / possible_changes, ROUND_NDIGITS)
ups = []
downs = []
sames = []
last_rating = None
for rating in ratings:
if last_rating:
if rating > last_rating:
ups.append(rating)
elif rating < last_rating:
downs.append(rating)
else:
sames.append(rating)
last_rating = rating
return {
'ups': changes_prop(ups),
'downs': changes_prop(downs),
'sames': changes_prop(sames)
}
def compile_experiment_data(df):
"""Take pandas dataframe and compile key variables. Return dict.
"""
compiled_data = {}
# condition
condition_col = df['condition'].values
compiled_data['condition'] = condition_col[0]
# blocks and block order
block_order_col = df['block_order'].values
block_order = block_order_col[0]
blocks = block_order.split(',')
compiled_data['block_order'] = block_order
compiled_data['num_blocks'] = len(blocks)
# hard and easy block positions (1-based index)
compiled_data['block_hard'] = blocks.index('hard') + 1
compiled_data['block_easy'] = blocks.index('easy') + 1
# anticipated questions
anticipated_questions_index = [
('anticipated_enjoyment', 1),
('anticipated_performance', 2),
('anticipated_effort', 3),
('anticipated_discomfort', 4),
('anticipated_fatigue', 5)
]
for label, i in anticipated_questions_index:
response = get_response_from_json(df.ix[i]['responses'])
compiled_data[label] = int(response[0])
# PASAT accuracy and affective reports
hard_accuracy = None
medium_accuracy = None
easy_accuracy = None
hard_effort = None
medium_effort = None
easy_effort = None
hard_discomfort = None
medium_discomfort = None
easy_discomfort = None
effort_ratings = []
discomfort_ratings = []
accuracies = []
rating_times = []
blocks_order = []
medium_effort_ratings = []
medium_discomfort_ratings = []
medium_accuracies = []
medium_block_rating_times = []
# collect and organize experiment data from experimental blocks
for i, block in enumerate(blocks, start=1):
# note: PASAT chunks start at chunk_id 0-0.3-0
block_chunk_id = '0-0.{}-0'.format(i + 2)
block = df.loc[df['internal_chunk_id'] == block_chunk_id]
block_summary = summarize_pasat_chunk(block)
blocks_order.append(i)
# add block summary rating times to list for later slope
# calculations
ratings_time_min = block_summary.pop('ratings_time_min')
rating_times.append(ratings_time_min)
# add block summaries to compiled data
compiled_data['effort_{}'.format(i)] = block_summary['effort']
discomfort_key = 'discomfort_{}'.format(i)
compiled_data[discomfort_key] = block_summary['discomfort']
accuracy_key = 'accuracy_{}'.format(i)
compiled_data[accuracy_key] = block_summary['accuracy']
# identify and organize data by block type
effort_ratings.append(block_summary['effort'])
discomfort_ratings.append(block_summary['discomfort'])
accuracies.append(block_summary['accuracy'])
if block_summary['block_type'] == 'medium':
medium_block_rating_times.append(ratings_time_min)
medium_accuracies.append(block_summary['accuracy'])
medium_effort_ratings.append(block_summary['effort'])
medium_discomfort_ratings.append(
block_summary['discomfort'])
elif block_summary['block_type'] == 'hard':
hard_accuracy = block_summary['accuracy']
hard_effort = block_summary['effort']
hard_discomfort = block_summary['discomfort']
elif block_summary['block_type'] == 'easy':
easy_accuracy = block_summary['accuracy']
easy_effort = block_summary['effort']
easy_discomfort = block_summary['discomfort']
# minimum/maximum discomfort and effort ratings
compiled_data['min_effort'] = min(effort_ratings)
compiled_data['max_effort'] = max(effort_ratings)
compiled_data['min_discomfort'] = min(discomfort_ratings)
compiled_data['max_discomfort'] = max(discomfort_ratings)
# compute medium block averages
medium_accuracy = np.mean(medium_accuracies)
compiled_data['medium_accuracy'] = round(medium_accuracy, ROUND_NDIGITS)
medium_effort = np.mean(medium_effort_ratings)
compiled_data['medium_effort'] = round(medium_effort, ROUND_NDIGITS)
medium_discomfort = np.mean(medium_discomfort_ratings)
compiled_data['medium_discomfort'] = round(
medium_discomfort, ROUND_NDIGITS)
# compute regression variables for blocks
block_measures = [
('accuracy', accuracies, rating_times),
('effort', effort_ratings, rating_times),
('discomfort', discomfort_ratings, rating_times),
('medium_accuracy', medium_accuracies,
medium_block_rating_times),
('medium_effort', medium_effort_ratings,
medium_block_rating_times),
('medium_discomfort', medium_discomfort_ratings,
medium_block_rating_times)
]
for measure_name, measure_y_val, measure_x_val in block_measures:
measure_regress = stats.linregress(
measure_x_val, measure_y_val)
compiled_data['{}_slope'.format(measure_name)] = round(
measure_regress.slope, ROUND_NDIGITS)
compiled_data['{}_intercept'.format(measure_name)] = round(
measure_regress.intercept, ROUND_NDIGITS)
# proportion of effort and discomfort ratings that increase or decrease
discomfort_props = _calculate_ratings_proportions(
discomfort_ratings)
compiled_data['prop_discomfort_ups'] = discomfort_props['ups']
compiled_data['prop_discomfort_downs'] = discomfort_props['downs']
compiled_data['prop_discomfort_sames'] = discomfort_props['sames']
effort_props = _calculate_ratings_proportions(effort_ratings)
compiled_data['prop_effort_ups'] = effort_props['ups']
compiled_data['prop_effort_downs'] = effort_props['downs']
compiled_data['prop_effort_sames'] = effort_props['sames']
# assign other variables
compiled_data['hard_accuracy'] = hard_accuracy
compiled_data['hard_effort'] = hard_effort
compiled_data['hard_discomfort'] = hard_discomfort
compiled_data['easy_accuracy'] = easy_accuracy
compiled_data['easy_effort'] = easy_effort
compiled_data['easy_discomfort'] = easy_discomfort
compiled_data['start_effort'] = effort_ratings[0]
compiled_data['peak_effort'] = max(effort_ratings)
compiled_data['end_effort'] = effort_ratings[-1]
avg_effort = np.mean(effort_ratings)
compiled_data['avg_effort'] = round(avg_effort, ROUND_NDIGITS)
compiled_data['start_discomfort'] = discomfort_ratings[0]
compiled_data['peak_discomfort'] = max(discomfort_ratings)
compiled_data['end_discomfort'] = discomfort_ratings[-1]
avg_discomfort = np.mean(discomfort_ratings)
compiled_data['avg_discomfort'] = round(avg_discomfort, ROUND_NDIGITS)
average_accuracy = np.mean(accuracies)
compiled_data['avg_accuracy'] = round(average_accuracy, ROUND_NDIGITS)
compiled_data['max_accuracy'] = max(accuracies)
compiled_data['min_accuracy'] = min(accuracies)
compiled_data['start_accuracy'] = accuracies[0]
compiled_data['end_accuracy'] = accuracies[-1]
# area under the curve calculations
compiled_data['auc_accuracy'] = round(
np.trapz(accuracies), ROUND_NDIGITS)
compiled_data['auc_effort'] = round(
np.trapz(effort_ratings), ROUND_NDIGITS)
compiled_data['auc_discomfort'] = round(
np.trapz(discomfort_ratings), ROUND_NDIGITS)
# time taken to complete working memory task
time_experiment_ms = int(df.ix[df.last_valid_index()]['time_elapsed'])
compiled_data['time_experiment_ms'] = time_experiment_ms
return compiled_data
def compile_demographic_data(df):
"""Take pandas dataframe and compile key variables. Return dict.
"""
compiled_data = {}
responses = list(df['responses'].dropna().values)
demographics_index = [
# demographics questions
('age', 1),
('dob', 2),
('sex', 3),
('edu_year', 4),
('edu_plan', 5),
('first_lang', 6),
('years_eng', 7),
('mother_edu', 8),
('mother_job', 9),
('father_edu', 10),
('father_job', 11),
('high_school_avg', 12),
('uni_avg', 13),
('num_uni_stats', 14),
('num_hs_stats', 15),
('num_hs_math', 16),
('num_uni_math', 17),
('math_enjoy', 18),
('adhd_diag', 19),
('uni_major', 20),
# electronics and Internet survey
('elect_survey_1', 21),
('elect_survey_2', 22),
('elect_survey_3', 23),
('elect_survey_4', 24),
('elect_survey_5', 25),
('elect_survey_6', 26),
('elect_survey_7', 27),
]
for label, i in demographics_index:
response = get_response_from_json(df.ix[i]['responses'])
compiled_data[label] = response.strip()
behavioural_survey = [
# behavioural survey
('behav_survey_1', 29),
('behav_survey_2', 30),
('behav_survey_3', 31),
('behav_survey_4', 32),
('behav_survey_5', 33),
('behav_survey_6', 34),
('behav_survey_7', 35),
('behav_survey_8', 36),
('behav_survey_9', 37),
('behav_survey_10', 38),
('behav_survey_11', 39),
('behav_survey_12', 40),
('behav_survey_13', 41),
('behav_survey_14', 42),
('behav_survey_15', 43),
('behav_survey_16', 44),
('behav_survey_17', 45),
('behav_survey_18', 46),
]
for label, i in behavioural_survey:
response = get_response_from_json(df.ix[i]['responses'])
if response[0].isdigit():
response = response[0]
compiled_data[label] = response
# post-working memory task delay
if 47 in df.index.values:
compiled_data['time_pwmt_delay_ms'] = int(df.ix[47]['time_elapsed'])
# time taken for post-working memory task follow-up
time_follow_up_ms = int(df.ix[df.last_valid_index()]['time_elapsed'])
compiled_data['time_follow_up_ms'] = time_follow_up_ms
return compiled_data
def compile_retrospective_data(df):
"""Take pandas dataframe and compile key variables. Return dict.
"""
compiled_data = {}
responses = list(df['responses'].dropna().values)
# retrospective questions
retrospective_index = [
('pwmt_effort', 48),
('pwmt_discomfort', 49),
('pwmt_enjoyment', 50),
('pwmt_performance', 51),
('pwmt_fatigue', 52),
('pwmt_satisfaction', 53),
('pwmt_willingtodowmt', 54),
]
for label, i in retrospective_index:
response = get_response_from_json(df.ix[i]['responses'])
compiled_data[label] = int(response[0])
return compiled_data
def main():
# collect lists of raw data CSVs
raw_data_csvs = {}
for exp_stage in ['practice', 'experiment', 'follow_up']:
raw_data_csvs[exp_stage] = get_csv_paths(DATA_DIR, exp_stage)
# create list of compiled participant data
compiled_participants = []
for practice_csv in raw_data_csvs['practice']:
participant = {
'missing_data': False
}
# compile practice data
practice_df = get_csv_as_dataframe(practice_csv)
compiled_practice_data = compile_practice_data(practice_df)
participant.update(compiled_practice_data)
# compile experimental and follow up data
# note: checks to ensure that assumed CSV files exist
for exp_stage in ['experiment', 'follow_up']:
assumed_csv_path = os.path.join(
DATA_DIR, exp_stage, '{}.csv'.format(participant['id']))
if assumed_csv_path in raw_data_csvs[exp_stage] and \
os.path.exists(assumed_csv_path):
stage_df = get_csv_as_dataframe(assumed_csv_path)
if exp_stage == 'experiment':
experiment_data = compile_experiment_data(stage_df)
participant.update(experiment_data)
elif exp_stage == 'follow_up':
demographics = compile_demographic_data(stage_df)
participant.update(demographics)
if participant['passed_practice']:
retrospective = compile_retrospective_data(stage_df)
participant.update(retrospective)
elif (exp_stage == 'experiment' and
participant['passed_practice']) or \
exp_stage == 'follow_up':
participant['missing_data'] = True
# append compiled participant data to master list
compiled_participants.append(participant)
# export complete data set to CSV
participants_df = pd.DataFrame.from_dict(compiled_participants)
compiled_csv_path = os.path.join(DATA_DIR, 'compiled.csv')
participants_df.to_csv(compiled_csv_path, encoding='utf-8')
# create list of columns for alternative analysis
first_columns = ['id', 'num_blocks']
block_columns = []
for i in range(1, 10):
block_columns.append('accuracy_{}'.format(i))
block_columns.append('discomfort_{}'.format(i))
block_columns.append('effort_{}'.format(i))
columns = (first_columns + sorted(block_columns))
# export data for alternative analysis to CSV
alt_analysis_df = participants_df[columns].copy()
alt_analysis_df.sort(columns=['num_blocks'], inplace=True)
alt_analysis_csv_path = os.path.join(DATA_DIR, 'alt_compiled.csv')
alt_analysis_df.to_csv(alt_analysis_csv_path, encoding='utf-8')
if __name__ == '__main__':
main()
| mit |
VladimirTyrin/urbansim | urbansim/urbanchoice/tests/test_interaction.py | 7 | 1734 | import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from .. import interaction as inter
@pytest.fixture
def choosers():
return pd.DataFrame(
{'var1': range(5, 10),
'thing_id': ['a', 'c', 'e', 'g', 'i']})
@pytest.fixture
def alternatives():
return pd.DataFrame(
{'var2': range(10, 20),
'var3': range(20, 30)},
index=pd.Index([x for x in 'abcdefghij'], name='thing_id'))
def test_interaction_dataset_sim(choosers, alternatives):
sample, merged, chosen = inter.mnl_interaction_dataset(
choosers, alternatives, len(alternatives))
# chosen should be len(choosers) rows * len(alternatives) cols
assert chosen.shape == (len(choosers), len(alternatives))
assert chosen[:, 0].sum() == len(choosers)
assert chosen[:, 1:].sum() == 0
npt.assert_array_equal(
sample, list(alternatives.index.values) * len(choosers))
assert len(merged) == len(choosers) * len(alternatives)
npt.assert_array_equal(merged.index.values, sample)
assert list(merged.columns) == [
'var2', 'var3', 'join_index', 'thing_id', 'var1']
npt.assert_array_equal(
merged['var1'].values,
choosers['var1'].values.repeat(len(alternatives)))
npt.assert_array_equal(
merged['thing_id'].values,
choosers['thing_id'].values.repeat(len(alternatives)))
npt.assert_array_equal(
merged['join_index'], choosers.index.values.repeat(len(alternatives)))
npt.assert_array_equal(
merged['var2'].values,
np.tile(alternatives['var2'].values, len(choosers)))
npt.assert_array_equal(
merged['var3'].values,
np.tile(alternatives['var3'].values, len(choosers)))
| bsd-3-clause |
VasLem/KinectPainting | construct_actions_table.py | 1 | 8161 | import os
from matplotlib import pyplot as plt
import numpy as np
import matplotlib.gridspec as gridspec
import class_objects as co
import cv2
import action_recognition_alg as ara
from textwrap import wrap
def extract_valid_action_utterance(action, testing=False, *args, **kwargs):
'''
Visualizes action or a testing dataset using predefined locations in
config.yaml and the method co.draw_oper.plot_utterances
'''
dataset_loc = '/media/vassilis/Thesis/Datasets/PersonalFarm/'
results_loc = '/home/vassilis/Thesis/KinectPainting/Results/DataVisualization'
ground_truth,breakpoints,labels = co.gd_oper.load_ground_truth(action, ret_labs=True,
ret_breakpoints=True)
images_base_loc = os.path.join(dataset_loc, 'actions',
'sets' if not testing else 'whole_result')
images_loc = os.path.join(images_base_loc, action.replace('_',' ').title())
imgs, masks, sync, angles, centers, samples_indices = co.imfold_oper.load_frames_data(images_loc,masks_needed=True)
masks_centers = []
xdim = 0
ydim = 0
conts = []
tmp = []
for mask,img in zip(masks,imgs):
conts = cv2.findContours(mask,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[1]
conts_areas = [cv2.contourArea(cont) for cont in conts]
tmp.append(np.sum(mask*img>0))
if np.sum(mask*img>0) < 500:
masks_centers.append(None)
else:
cont = conts[np.argmax(conts_areas)]
x,y,w,h = cv2.boundingRect(cont)
if w == 0 or h == 0:
masks_centers.append(None)
else:
masks_centers.append([y+h/2,x+w/2])
xdim = max(w,xdim)
ydim = max(h,ydim)
cropped_imgs = []
for img, center in zip(imgs, masks_centers):
if center is not None:
cropped_img =img[max(0,center[0]-ydim/2)
:min(img.shape[0],center[0]+ydim/2),
max(0,center[1]-xdim/2)
:min(img.shape[0],center[1]+xdim/2)]
inp_img = np.zeros((ydim, xdim))
inp_img[:cropped_img.shape[0],:cropped_img.shape[1]] = cropped_img
cropped_imgs.append(inp_img)
else:
cropped_imgs.append(None)
return cropped_imgs, sync, ground_truth, breakpoints, labels
def construct_table(action_type):
fil = os.path.join(co.CONST['rosbag_location'],
'gestures_type.csv')
if os.path.exists(fil):
with open(fil, 'r') as inp:
for line in inp:
if line.split(':')[0].lower() == action_type.lower():
used_actions = line.split(
':')[1].rstrip('\n').split(',')
else:
raise Exception()
SHOWN_IMS = 10
actions = [action for action in os.listdir(co.CONST['actions_path'])
if action in used_actions]
print actions
images=[]
for action in actions:
print 'Processing', action
whole = os.path.join(co.CONST['actions_path'],action)
cnt = 0
(frames, frames_sync,
ground_truth, breakpoints, labels) =\
extract_valid_action_utterance(action.replace(' ','_').lower())
for (start, end) in zip(breakpoints[action][0],
breakpoints[action][1]):
if (start in frames_sync
and end in frames_sync and
end-start > SHOWN_IMS):
rat_of_nans = sum([img is None for img
in frames[frames_sync.index(start):
frames_sync.index(end)]]) / float(
end-start+1)
if rat_of_nans < 0.1:
break
cnt += 1
masks = os.path.join(whole, co.CONST['hnd_mk_fold_name'], str(cnt))
data = os.path.join(whole, co.CONST['mv_obj_fold_name'], str(cnt))
start = breakpoints[action][0][cnt]
end = breakpoints[action][1][cnt]
angles = []
with open(os.path.join(data, 'angles.txt'), 'r') as inp:
angles += map(float, inp)
centers = []
with open(os.path.join(data, 'centers.txt'), 'r') as inp:
for line in inp:
center = [
float(num) for num
in line.split(' ')]
centers += [center]
fils = sorted(os.listdir(masks))
inds = np.array([int(filter(str.isdigit,fil)) for fil in fils])
imgset = []
prev_size = 0
for ind in (np.linspace(0,len(fils)-1,SHOWN_IMS)).astype(int):
count = ind
while True:
mask = cv2.imread(os.path.join(masks, fils[ind]),0)>0
if np.sum(mask) > 0.6 * (prev_size) or count == len(inds)-1:
prev_size = np.sum(mask)
break
else:
count += 1
img = (cv2.imread(os.path.join(data,fils[count]),-1)*
mask)
processed_img = co.pol_oper.derotate(
img,
angles[count], centers[count])
img,_,_ = ara.prepare_im(processed_img,square=True)
img = np.pad(cv2.equalizeHist(img.astype(np.uint8)),[[0,0],[5,5]],
mode='constant', constant_values=155)
imgset.append(img)
images.append(imgset)
images = np.array(images)
images = list(images)
im_indices = np.arange(SHOWN_IMS)
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
gs = gridspec.GridSpec(len(images), 1+SHOWN_IMS)
gs.update(wspace=0.0, hspace=0.0)
fig = plt.figure(figsize=(1+SHOWN_IMS, len(images)))
fig_axes = fig.add_subplot(gs[:,:],adjustable='box-forced')
fig_axes.set_xticklabels([])
fig_axes.set_yticklabels([])
fig_axes.set_xticks([])
fig_axes.set_yticks([])
fig_axes.set_aspect('auto')
fig.subplots_adjust(wspace=0, hspace=0)
im_inds = np.arange(len(images)*(1+SHOWN_IMS)).reshape(
len(images),1+SHOWN_IMS)[:,1:].ravel()
txt_inds = np.arange(len(images)*(1+SHOWN_IMS)).reshape(
len(images),1+SHOWN_IMS)[:,:1].ravel()
axes = [fig.add_subplot(gs[i]) for i in range(len(images)*
(1+SHOWN_IMS))]
im_axes = list(np.array(axes)[im_inds])
for axis in axes:
axis.set_xticklabels([])
axis.set_yticklabels([])
axis.set_xticks([])
axis.set_yticks([])
ax_count = 0
for im_set_count in range(len(images)):
for im_count in list(im_indices):
im_shape = list(images[im_set_count])[im_count].shape
axes[im_inds[ax_count]].imshow(list(images[im_set_count])[
im_count], aspect='auto',cmap='gray')
axes[im_inds[ax_count]].set_xlim((0,max(im_shape)))
axes[im_inds[ax_count]].set_ylim((0,max(im_shape)))
ax_count += 1
ax_count = 0
info = np.array(actions)
for im_count in range(len(images)):
text = ('\n').join(wrap(info[im_count],10))
axes[txt_inds[ax_count]].text(0.5*(left+right), 0.5*(bottom+top),
text,
horizontalalignment='center',
verticalalignment='center',
fontsize=9)
ax_count+=1
cellText = [['Gesture']+[str(i) for i in range(SHOWN_IMS)]]
col_table = fig_axes.table(cellText=cellText,
cellLoc='center',
loc='top')
save_fold = os.path.join(co.CONST['results_fold'],
'Classification',
'Total')
co.makedir(save_fold)
plt.savefig(os.path.join(save_fold,action_type + 'actions_vocabulary.pdf'))
construct_table('dynamic')
construct_table('passive')
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
zorroblue/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 81 | 2525 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired,
edgecolor='black', s=20)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
poine/rosmip | rosmip/rosmip_control/scripts/test_sfb_ctl_on_ann.py | 1 | 6165 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np, matplotlib.pyplot as plt
import scipy.signal, scipy.optimize, control, control.matlab
import keras
import pdb
import julie_misc.plot_utils as jpu
import ident_plant
# https://programtalk.com/vs2/?source=python/10610/python-control/external/yottalab.py
def d2c(sys,method='zoh'):
"""Continous to discrete conversion with ZOH method
Call:
sysc=c2d(sys,method='log')
Parameters
----------
sys : System in statespace or Tf form
method: 'zoh' or 'bi'
Returns
-------
sysc: continous system ss or tf
"""
flag = 0
if isinstance(sys, control.TransferFunction):
sys=tf2ss(sys)
flag=1
a=sys.A
b=sys.B
c=sys.C
d=sys.D
Ts=sys.dt
n=np.shape(a)[0]
nb=np.shape(b)[1]
nc=np.shape(c)[0]
tol=1e-12
if method=='zoh':
if n==1:
if b[0,0]==1:
A=0
B=b/sys.dt
C=c
D=d
else:
tmp1=np.hstack((a,b))
tmp2=np.hstack((np.zeros((nb,n)),np.eye(nb)))
tmp=np.vstack((tmp1,tmp2))
s=scipy.linalg.logm(tmp)
s=s/Ts
if np.linalg.norm(np.imag(s), np.inf) > np.sqrt(np.finfo(float).eps):
print "Warning: accuracy may be poor"
s=np.real(s)
A=s[0:n,0:n]
B=s[0:n,n:n+nb]
C=c
D=d
elif method=='foh':
a=mat(a)
b=mat(b)
c=mat(c)
d=mat(d)
Id = mat(eye(n))
A = logm(a)/Ts
A = real(around(A,12))
Amat = mat(A)
B = (a-Id)**(-2)*Amat**2*b*Ts
B = real(around(B,12))
Bmat = mat(B)
C = c
D = d - C*(Amat**(-2)/Ts*(a-Id)-Amat**(-1))*Bmat
D = real(around(D,12))
elif method=='bi':
a=mat(a)
b=mat(b)
c=mat(c)
d=mat(d)
poles=eigvals(a)
if any(abs(poles-1)<200*sp.finfo(float).eps):
print "d2c: some poles very close to one. May get bad results."
I=mat(eye(n,n))
tk = 2 / sqrt (Ts)
A = (2/Ts)*(a-I)*inv(a+I)
iab = inv(I+a)*b
B = tk*iab
C = tk*(c*inv(I+a))
D = d- (c*iab)
else:
print "Method not supported"
return
sysc=control.StateSpace(A,B,C,D)
if flag==1:
sysc=ss2tf(sysc)
return sysc
def dlqr(A, B, Q, R):
"""Linear quadratic regulator design for discrete systems
Usage
=====
[K, S, E] = dlqr(A, B, Q, R)
The dlqr() function computes the optimal state feedback controller
that minimizes the quadratic cost
J = \sum_0^\infty x' Q x + u' R u + 2 x' N u
Inputs
------
A, B: 2-d arrays with dynamics and input matrices
Q, R: 2-d array with state and input weight matrices
Outputs
-------
K: 2-d array with state feedback gains
S: 2-d array with solution to Riccati equation
E: 1-d array with eigenvalues of the closed loop system
"""
# Check dimensions for consistency
nstates = B.shape[0];
ninputs = B.shape[1];
if (A.shape[0] != nstates or A.shape[1] != nstates):
raise ControlDimension("inconsistent system dimensions")
elif (Q.shape[0] != nstates or Q.shape[1] != nstates or
R.shape[0] != ninputs or R.shape[1] != ninputs):
raise ControlDimension("incorrect weighting matrix dimensions")
Ao=A
Qo=Q
#Solve the riccati equation
(X,L,G) = control.dare(Ao,B,Qo,R)
# Now compute the return value
Phi=np.mat(A)
H=np.mat(B)
K=np.linalg.inv(H.T*X*H+R)*(H.T*X*Phi)
L=np.linalg.eig(Phi-H*K)
return K,X,L
def sim_cl_ann(ann, K, dt=0.01):
time = np.arange(0, 15, dt)
X, U = np.zeros((len(time), 6)), np.zeros((len(time), 2))
phi0, gamma0, theta0 = np.deg2rad(1), np.deg2rad(1), np.deg2rad(1)
X[0] = [phi0, gamma0, 0, 0, theta0, 0]
for k in range(1, len(time)):
U[k-1] = -np.dot(K, X[k-1])
_in_km1 = np.hstack((X[k-1], U[k-1]))[np.newaxis,:]
X[k] = ann.predict(_in_km1)
ax = plt.subplot(3,1,1)
plt.plot(time, np.rad2deg(X[:,0]))
jpu. decorate(ax, title='phi', xlab='time in s', ylab='deg', legend=True)
ax = plt.subplot(3,1,2)
plt.plot(time, np.rad2deg(X[:,1]))
jpu. decorate(ax, title='gamma', xlab='time in s', ylab='deg', legend=True)
ax = plt.subplot(3,1,3)
plt.plot(time, np.rad2deg(X[:,4]))
jpu. decorate(ax, title='theta', xlab='time in s', ylab='deg', legend=True)
plt.show()
def main(dt=0.01):
ann = ident_plant.ANN('/tmp/rosmip_ann.h5')
Ad, Bd = ann.report()
# phi, gamma, phi_dot, gamma_dot, theta, theta_dot
if 0: # continuous time LQR
C, D = [1, 0, 0, 0, 0, 0], [0, 0]
ss_d = control.StateSpace(Ad,Bd,C,D, dt)
ss_c = d2c(ss_d)
#Ac = scipy.linalg.logm(Ad)/dt
#tmp = np.linalg.inv(Ad-np.eye(Ad.shape[0]))
#Bc = np.dot(tmp, np.dot(Ac, Bd))
Q = np.diag([1., 1., 0.1, 0.1, 0.1, 0.01])
R = np.diag([6, 6])
(K, X, E) = control.matlab.lqr(ss_c.A, ss_c.B, Q, R)
print('gain\n{}'.format(K))
Acl = ss_c.A - np.dot(ss_c.B, K)
eva, eve = np.linalg.eig(Acl)
print('continuous time closed loop poles\n{}'.format(eva))
#pdb.set_trace()
if 1: # discrete time LQR
Q = np.diag([20., 20., 0.1, 0.5, 0.1, 0.01])
R = np.diag([6, 6])
(K, X, E) = dlqr(Ad, Bd, Q, R)
print('gain\n{}'.format(K))
print('closed loop discrete time poles {}'.format(E[0]))
cl_polesc = np.log(E[0])/dt
print('closed loop continuous time poles {}'.format(cl_polesc))
#pdb.set_trace()
if 0: # discrete time place
poles = [-12+12j, -12-12j, -6.5+1j, -6.5-1j, -1-1j, -1+1j]
K = control.matlab.place(Ad, Bd, poles)
sim_cl_ann(ann, K)
if __name__ == "__main__":
#logging.basicConfig(level=logging.INFO)
keras.backend.set_floatx('float64')
np.set_printoptions(precision=2, linewidth=300)
main()
| gpl-3.0 |
graphistry/pygraphistry | graphistry/tests/test_hypergraph.py | 1 | 9926 | # -*- coding: utf-8 -*-
import datetime as dt, logging, pandas as pd, pyarrow as pa
import graphistry, graphistry.plotter
from common import NoAuthTestCase
logger = logging.getLogger(__name__)
nid = graphistry.plotter.Plotter._defaultNodeId
triangleNodesDict = {
'id': ['a', 'b', 'c'],
'a1': [1, 2, 3],
'a2': ['red', 'blue', 'green'],
'🙈': ['æski ēˈmōjē', '😋', 's']
}
triangleNodes = pd.DataFrame(triangleNodesDict)
hyper_df = pd.DataFrame({'aa': [0, 1, 2], 'bb': ['a', 'b', 'c'], 'cc': ['b', 0, 1]})
squareEvil = pd.DataFrame({
'src': [0,1,2,3],
'dst': [1,2,3,0],
'colors': [1, 1, 2, 2],
'list_int': [ [1], [2, 3], [4], []],
'list_str': [ ['x'], ['1', '2'], ['y'], []],
'list_bool': [ [True], [True, False], [False], []],
'list_date_str': [ ['2018-01-01 00:00:00'], ['2018-01-02 00:00:00', '2018-01-03 00:00:00'], ['2018-01-05 00:00:00'], []],
'list_date': [ [pd.Timestamp('2018-01-05')], [pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05')], [], []],
'list_mixed': [ [1], ['1', '2'], [False, None], []],
'bool': [True, False, True, True],
'char': ['a', 'b', 'c', 'd'],
'str': ['a', 'b', 'c', 'd'],
'ustr': [u'a', u'b', u'c', u'd'],
'emoji': ['😋', '😋😋', '😋', '😋'],
'int': [0, 1, 2, 3],
'num': [0.5, 1.5, 2.5, 3.5],
'date_str': ['2018-01-01 00:00:00', '2018-01-02 00:00:00', '2018-01-03 00:00:00', '2018-01-05 00:00:00'],
# API 1 BUG: Try with https://github.com/graphistry/pygraphistry/pull/126
'date': [dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1)],
'time': [pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05')],
# API 2 BUG: Need timedelta in https://github.com/graphistry/pygraphistry/blob/master/graphistry/vgraph.py#L108
'delta': [pd.Timedelta('1 day'), pd.Timedelta('1 day'), pd.Timedelta('1 day'), pd.Timedelta('1 day')]
})
for c in squareEvil.columns:
try:
squareEvil[c + '_cat'] = squareEvil[c].astype('category')
except:
# lists aren't categorical
#print('could not make categorical', c)
1
def assertFrameEqual(df1, df2, **kwds ):
""" Assert that two dataframes are equal, ignoring ordering of columns"""
from pandas.testing import assert_frame_equal
return assert_frame_equal(df1.sort_index(axis=1), df2.sort_index(axis=1), check_names=True, **kwds)
class TestHypergraphPlain(NoAuthTestCase):
def test_hyperedges(self):
h = graphistry.hypergraph(triangleNodes, verbose=False)
self.assertEqual(len(h.keys()), len(['entities', 'nodes', 'edges', 'events', 'graph']))
edges = pd.DataFrame({
'a1': [1, 2, 3] * 4,
'a2': ['red', 'blue', 'green'] * 4,
'id': ['a', 'b', 'c'] * 4,
'🙈': ['æski ēˈmōjē', '😋', 's'] * 4,
'edgeType': ['a1', 'a1', 'a1', 'a2', 'a2', 'a2', 'id', 'id', 'id', '🙈', '🙈', '🙈'],
'attribID': [
'a1::1', 'a1::2', 'a1::3',
'a2::red', 'a2::blue', 'a2::green',
'id::a', 'id::b', 'id::c',
'🙈::æski ēˈmōjē', '🙈::😋', '🙈::s'],
'EventID': ['EventID::0', 'EventID::1', 'EventID::2', 'EventID::0', 'EventID::1', 'EventID::2', 'EventID::0', 'EventID::1', 'EventID::2', 'EventID::0', 'EventID::1', 'EventID::2']})
assertFrameEqual(h['edges'], edges)
for (k, v) in [('entities', 12), ('nodes', 15), ('edges', 12), ('events', 3)]:
self.assertEqual(len(h[k]), v)
def test_hyperedges_direct(self):
h = graphistry.hypergraph(hyper_df, verbose=False, direct=True)
self.assertEqual(len(h['edges']), 9)
self.assertEqual(len(h['nodes']), 9)
def test_hyperedges_direct_categories(self):
h = graphistry.hypergraph(hyper_df, verbose=False, direct=True, opts={'CATEGORIES': {'n': ['aa', 'bb', 'cc']}})
self.assertEqual(len(h['edges']), 9)
self.assertEqual(len(h['nodes']), 6)
def test_hyperedges_direct_manual_shaping(self):
h1 = graphistry.hypergraph(hyper_df, verbose=False, direct=True, opts={'EDGES': {'aa': ['cc'], 'cc': ['cc']}})
self.assertEqual(len(h1['edges']), 6)
h2 = graphistry.hypergraph(hyper_df, verbose=False, direct=True, opts={'EDGES': {'aa': ['cc', 'bb', 'aa'], 'cc': ['cc']}})
self.assertEqual(len(h2['edges']), 12)
def test_drop_edge_attrs(self):
h = graphistry.hypergraph(triangleNodes, ['id', 'a1', '🙈'], verbose=False, drop_edge_attrs=True)
self.assertEqual(len(h.keys()), len(['entities', 'nodes', 'edges', 'events', 'graph']))
edges = pd.DataFrame({
'edgeType': ['a1', 'a1', 'a1', 'id', 'id', 'id', '🙈', '🙈', '🙈'],
'attribID': [
'a1::1', 'a1::2', 'a1::3',
'id::a', 'id::b', 'id::c',
'🙈::æski ēˈmōjē', '🙈::😋', '🙈::s'],
'EventID': ['EventID::0', 'EventID::1', 'EventID::2', 'EventID::0', 'EventID::1', 'EventID::2', 'EventID::0', 'EventID::1', 'EventID::2']})
assertFrameEqual(h['edges'], edges)
for (k, v) in [('entities', 9), ('nodes', 12), ('edges', 9), ('events', 3)]:
self.assertEqual(len(h[k]), v)
def test_drop_edge_attrs_direct(self):
h = graphistry.hypergraph(triangleNodes,
['id', 'a1', '🙈'],
verbose=False, direct=True, drop_edge_attrs=True,
opts = {
'EDGES': {
'id': ['a1'],
'a1': ['🙈']
}
})
logger.debug('h.nodes: %s', h['graph']._nodes)
logger.debug('h.edges: %s', h['graph']._edges)
self.assertEqual(len(h.keys()), len(['entities', 'nodes', 'edges', 'events', 'graph']))
edges = pd.DataFrame({
'edgeType': ['a1::🙈', 'a1::🙈', 'a1::🙈', 'id::a1', 'id::a1', 'id::a1'],
'src': [
'a1::1', 'a1::2', 'a1::3',
'id::a', 'id::b', 'id::c'],
'dst': [
'🙈::æski ēˈmōjē', '🙈::😋', '🙈::s',
'a1::1', 'a1::2', 'a1::3'],
'EventID': [
'EventID::0', 'EventID::1', 'EventID::2',
'EventID::0', 'EventID::1', 'EventID::2']})
assertFrameEqual(h['edges'], edges)
for (k, v) in [('entities', 9), ('nodes', 9), ('edges', 6), ('events', 0)]:
logger.error('testing: %s', k)
logger.error('actual: %s', h[k])
self.assertEqual(len(h[k]), v)
def test_drop_na_hyper(self):
df = pd.DataFrame({
'a': ['a', None, 'c'],
'i': [1, 2, None]
})
hg = graphistry.hypergraph(df, drop_na=True)
assert len(hg['graph']._nodes) == 7
assert len(hg['graph']._edges) == 4
def test_drop_na_direct(self):
df = pd.DataFrame({
'a': ['a', None, 'a'],
'i': [1, 1, None]
})
hg = graphistry.hypergraph(df, drop_na=True, direct=True)
assert len(hg['graph']._nodes) == 2
assert len(hg['graph']._edges) == 1
def test_skip_na_hyperedge(self):
nans_df = pd.DataFrame({
'x': ['a', 'b', 'c'],
'y': ['aa', None, 'cc']
})
expected_hits = ['a', 'b', 'c', 'aa', 'cc']
skip_attr_h_edges = graphistry.hypergraph(nans_df, drop_edge_attrs=True)['edges']
self.assertEqual(len(skip_attr_h_edges), len(expected_hits))
default_h_edges = graphistry.hypergraph(nans_df)['edges']
self.assertEqual(len(default_h_edges), len(expected_hits))
def test_hyper_evil(self):
graphistry.hypergraph(squareEvil)
def test_hyper_to_pa_vanilla(self):
df = pd.DataFrame({
'x': ['a', 'b', 'c'],
'y': ['d', 'e', 'f']
})
hg = graphistry.hypergraph(df)
nodes_arr = pa.Table.from_pandas(hg['graph']._nodes)
assert len(nodes_arr) == 9
edges_err = pa.Table.from_pandas(hg['graph']._edges)
assert len(edges_err) == 6
def test_hyper_to_pa_mixed(self):
df = pd.DataFrame({
'x': ['a', 'b', 'c'],
'y': [1, 2, 3]
})
hg = graphistry.hypergraph(df)
nodes_arr = pa.Table.from_pandas(hg['graph']._nodes)
assert len(nodes_arr) == 9
edges_err = pa.Table.from_pandas(hg['graph']._edges)
assert len(edges_err) == 6
def test_hyper_to_pa_na(self):
df = pd.DataFrame({
'x': ['a', None, 'c'],
'y': [1, 2, None]
})
hg = graphistry.hypergraph(df, drop_na=False)
nodes_arr = pa.Table.from_pandas(hg['graph']._nodes)
assert len(hg['graph']._nodes) == 9
assert len(nodes_arr) == 9
edges_err = pa.Table.from_pandas(hg['graph']._edges)
assert len(hg['graph']._edges) == 6
assert len(edges_err) == 6
def test_hyper_to_pa_all(self):
hg = graphistry.hypergraph(triangleNodes, ['id', 'a1', '🙈'])
nodes_arr = pa.Table.from_pandas(hg['graph']._nodes)
assert len(hg['graph']._nodes) == 12
assert len(nodes_arr) == 12
edges_err = pa.Table.from_pandas(hg['graph']._edges)
assert len(hg['graph']._edges) == 9
assert len(edges_err) == 9
def test_hyper_to_pa_all_direct(self):
hg = graphistry.hypergraph(triangleNodes, ['id', 'a1', '🙈'], direct=True)
nodes_arr = pa.Table.from_pandas(hg['graph']._nodes)
assert len(hg['graph']._nodes) == 9
assert len(nodes_arr) == 9
edges_err = pa.Table.from_pandas(hg['graph']._edges)
assert len(hg['graph']._edges) == 9
assert len(edges_err) == 9
| bsd-3-clause |
yonglehou/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
dpaiton/OpenPV | pv-core/analysis/python/rate_estimation.py | 1 | 2913 | #!/usr/bin/python
# script to estimate the firing rate in L1 versus the background rate in retina
# copy this script to the location of your sandbox (STDP, marian, etc)
import os
import re # regular expression module
import time
import sys
import numpy as np
#import matplotlib.pyplot as plt
#import matplotlib.mlab as mlab
sys.path.append('/Users/manghel/Documents/workspace/PetaVision/analysis/python/')
import PVReadWeights as rw
import PVReadSparse as rs
path = '/Users/manghel/Documents/workspace/STDP/'
if len(sys.argv) < 4:
print "usage: python rate_estimation timeSteps rateSteps dT"
exit()
def modify_input(p):
#print 'modify param.stdp for noiseOffFreq = %f' % p
input = open(path + 'input/params.base','r')
output = open(path + 'input/params.stdp','w')
while 1:
line = input.readline()
if line == '':break
if line.find('noiseOffFreq') >= 0:
S = ' noiseOffFreq = ' + str(p) + ';\n'
output.write(S)
else:
output.write(line)
input.close()
output.close()
return 0
# end modify_input
def compute_rate(p,timeSteps, rateSteps, dT):
infile = path + 'output/' + 'a1.pvp'
output = open(path + 'output/rate.stdp','a')
beginTime = (timeSteps-rateSteps)*dT
endTime = timeSteps*dT
s = rs.PVReadSparse(infile);
rate = s.average_rate(beginTime,endTime)
output.write(str(p) + ' ' + str(rate) + '\n')
output.close()
return rate
# end compute_rate
def compute_histogram(p):
infile = path + 'output/' + 'w0_last.pvp'
output = open(path + 'output/w0_last_hist_' + str(p) + '.dat','w')
w = rw.PVReadWeights(infile)
h = w.histogram()
for i in range(len(h)):
output.write(str(h[i]) + '\n')
output.close()
# end compute_histogram
"""
Main code:
- modifies the params.stdp file to set the retina
background noise.
- compute time-averaged firing rate in L1.
- compute the histogram of the weights distribution.
"""
p = 10 # starting background retina noise (Hz)
timeSteps = sys.argv[1] # length of simulation (timeSteps)
rateSteps = sys.argv[2] # asymptotic time steps used to compute rate
dT = sys.argv[3] # simulation time step interval
print '\ntimeSteps = %s rateSteps = %s dT = %s \n' % (timeSteps,rateSteps,dT)
while p <= 100:
print 'run model for noiseOffFreq = %f' % p
modify_input(p)
#time.sleep(10)
cmd = path + '/Debug/stdp -n ' + timeSteps + ' -p ' + path + '/input/params.stdp'
#print cmd
os.system(cmd)
rate = compute_rate(p, float(timeSteps), float(rateSteps), float(dT) )
print ' p = %f rate = %f \n' % (p,rate)
# compute histogram
compute_histogram(p)
# remove files
cmd = 'rm ' + path + '/output/images/*'
os.system(cmd)
cmd = 'rm ' + path + '/output/*.pvp'
os.system(cmd)
p += 100
| epl-1.0 |
snnn/tensorflow | tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py | 136 | 1696 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
uwescience/pulse2percept | pulse2percept/implants/base.py | 1 | 8549 | """`ProsthesisSystem`"""
import numpy as np
from copy import deepcopy
from .electrodes import Electrode
from .electrode_arrays import ElectrodeArray
from ..stimuli import Stimulus
from ..utils import PrettyPrint
class ProsthesisSystem(PrettyPrint):
"""Visual prosthesis system
A visual prosthesis combines an electrode array and (optionally) a
stimulus. This is the base class for prosthesis systems such as
:py:class:`~pulse2percept.implants.ArgusII` and
:py:class:`~pulse2percept.implants.AlphaIMS`.
.. versionadded:: 0.6
Parameters
----------
earray : :py:class:`~pulse2percept.implants.ElectrodeArray` or
:py:class:`~pulse2percept.implants.Electrode`
The electrode array used to deliver electrical stimuli to the retina.
stim : :py:class:`~pulse2percept.stimuli.Stimulus` source type
A valid source type for the :py:class:`~pulse2percept.stimuli.Stimulus`
object (e.g., scalar, NumPy array, pulse train).
eye : 'LE' or 'RE'
A string indicating whether the system is implanted in the left ('LE')
or right eye ('RE')
Examples
--------
A system in the left eye made from a single
:py:class:`~pulse2percept.implants.DiskElectrode` with radius
r=100um sitting at x=200um, y=-50um, z=10um:
>>> from pulse2percept.implants import DiskElectrode, ProsthesisSystem
>>> implant = ProsthesisSystem(DiskElectrode(200, -50, 10, 100), eye='LE')
.. note::
A stimulus can also be assigned later (see
:py:attr:`~pulse2percept.implants.ProsthesisSystem.stim`).
"""
# Frozen class: User cannot add more class attributes
__slots__ = ('_earray', '_stim', '_eye')
def __init__(self, earray, stim=None, eye='RE'):
self.earray = earray
self.stim = stim
self.eye = eye
def _pprint_params(self):
"""Return dict of class attributes to pretty-print"""
return {'earray': self.earray, 'stim': self.stim, 'eye': self.eye}
def check_stim(self, stim):
"""Quality-check the stimulus
This method is executed every time a new value is assigned to ``stim``.
No checks are performed by default, but the user can define their own
checks in implants that inherit from
:py:class:`~pulse2percept.implants.ProsthesisSystem`.
Parameters
----------
stim : :py:class:`~pulse2percept.stimuli.Stimulus` source type
A valid source type for the
:py:class:`~pulse2percept.stimuli.Stimulus` object (e.g., scalar,
NumPy array, pulse train).
"""
pass
def plot(self, annotate=False, autoscale=True, ax=None):
"""Plot
Parameters
----------
annotate : bool, optional
Whether to scale the axes view to the data
autoscale : bool, optional
Whether to adjust the x,y limits of the plot to fit the implant
ax : matplotlib.axes._subplots.AxesSubplot, optional
A Matplotlib axes object. If None, will either use the current axes
(if exists) or create a new Axes object.
Returns
-------
ax : ``matplotlib.axes.Axes``
Returns the axis object of the plot
"""
return self.earray.plot(annotate=annotate, autoscale=autoscale, ax=ax)
@property
def earray(self):
"""Electrode array
"""
return self._earray
@earray.setter
def earray(self, earray):
"""Electrode array setter (called upon ``self.earray = earray``)"""
# Assign the electrode array:
if isinstance(earray, Electrode):
# For convenience, build an array from a single electrode:
earray = ElectrodeArray(earray)
if not isinstance(earray, ElectrodeArray):
raise TypeError("'earray' must be an ElectrodeArray object, not "
"%s." % type(earray))
self._earray = earray
@property
def stim(self):
"""Stimulus
A stimulus can be created from many source types, such as scalars,
NumPy arrays, and dictionaries (see
:py:class:`~pulse2percept.stimuli.Stimulus` for a complete list).
A stimulus can be assigned either in the
:py:class:`~pulse2percept.implants.ProsthesisSystem` constructor
or later by assigning a value to `stim`.
.. note::
Unless when using dictionary notation, the number of stimuli must
equal the number of electrodes in ``earray``.
Examples
--------
Send a biphasic pulse (30uA, 0.45ms phase duration) to an implant made
from a single :py:class:`~pulse2percept.implants.DiskElectrode`:
>>> from pulse2percept.implants import DiskElectrode, ProsthesisSystem
>>> from pulse2percept.stimuli import BiphasicPulse
>>> implant = ProsthesisSystem(DiskElectrode(0, 0, 0, 100))
>>> implant.stim = BiphasicPulse(30, 0.45)
Stimulate Electrode B7 in Argus II with 13 uA:
>>> from pulse2percept.implants import ArgusII
>>> implant = ArgusII(stim={'B7': 13})
"""
return self._stim
@stim.setter
def stim(self, data):
"""Stimulus setter (called upon ``self.stim = data``)"""
if data is None:
self._stim = None
else:
if isinstance(data, Stimulus):
# Already a stimulus object:
stim = Stimulus(data, extrapolate=True)
elif isinstance(data, dict):
# Electrode names already provided by keys:
stim = Stimulus(data, extrapolate=True)
else:
# Use electrode names as stimulus coordinates:
stim = Stimulus(data, electrodes=list(self.earray.keys()),
extrapolate=True)
# Make sure all electrode names are valid:
for electrode in stim.electrodes:
# Invalid index will return None:
if not self.earray[electrode]:
raise ValueError("Electrode '%s' not found in "
"implant." % electrode)
# Perform safety checks, etc.:
self.check_stim(stim)
# Store stimulus:
self._stim = deepcopy(stim)
@property
def eye(self):
"""Implanted eye
A :py:class:`~pulse2percept.implants.ProsthesisSystem` can be implanted
either in a left eye ('LE') or right eye ('RE'). Models such as
:py:class:`~pulse2percept.models.AxonMapModel` will treat left and
right eyes differently (for example, adjusting the location of the
optic disc).
Examples
--------
Implant Argus II in a left eye:
>>> from pulse2percept.implants import ArgusII
>>> implant = ArgusII(eye='LE')
"""
return self._eye
@eye.setter
def eye(self, eye):
"""Eye setter (called upon `self.eye = eye`)"""
if not isinstance(eye, str):
raise TypeError("'eye' must be a string, not %s." % type(eye))
eye = eye.upper()
if eye != 'LE' and eye != 'RE':
raise ValueError("'eye' must be either 'LE' or 'RE', not "
"%s." % eye)
self._eye = eye
@property
def n_electrodes(self):
"""Number of electrodes in the array
This is equivalent to calling ``earray.n_electrodes``.
"""
return self.earray.n_electrodes
def __getitem__(self, item):
return self.earray[item]
def __iter__(self):
return iter(self.earray)
def keys(self):
"""Return all electrode names in the electrode array"""
return self.earray.keys()
def values(self):
"""Return all electrode objects in the electrode array"""
return self.earray.values()
def items(self):
"""Return all electrode names and objects in the electrode array
Internally, electrodes are stored in a dictionary in
``earray.electrodes``. For convenience, electrodes can also be accessed
via ``items``.
Examples
--------
Save the x-coordinates of all electrodes of Argus I in a dictionary:
>>> from pulse2percept.implants import ArgusI
>>> xcoords = {}
>>> for name, electrode in ArgusI().items():
... xcoords[name] = electrode.x
"""
return self.earray.items()
| bsd-3-clause |
kambysese/mne-python | tutorials/machine-learning/plot_sensors_decoding.py | 4 | 17548 | r"""
===============
Decoding (MVPA)
===============
.. include:: ../../links.inc
Design philosophy
=================
Decoding (a.k.a. MVPA) in MNE largely follows the machine
learning API of the scikit-learn package.
Each estimator implements ``fit``, ``transform``, ``fit_transform``, and
(optionally) ``inverse_transform`` methods. For more details on this design,
visit scikit-learn_. For additional theoretical insights into the decoding
framework in MNE :footcite:`KingEtAl2018`.
For ease of comprehension, we will denote instantiations of the class using
the same name as the class but in small caps instead of camel cases.
Let's start by loading data for a simple two-class problem:
"""
# sphinx_gallery_thumbnail_number = 6
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import mne
from mne.datasets import sample
from mne.decoding import (SlidingEstimator, GeneralizingEstimator, Scaler,
cross_val_multiscore, LinearModel, get_coef,
Vectorizer, CSP)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax = -0.200, 0.500
event_id = {'Auditory/Left': 1, 'Visual/Left': 3} # just use two
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# The subsequent decoding analyses only capture evoked responses, so we can
# low-pass the MEG data. Usually a value more like 40 Hz would be used,
# but here low-pass at 20 so we can more heavily decimate, and allow
# the examlpe to run faster. The 2 Hz high-pass helps improve CSP.
raw.filter(2, 20)
events = mne.find_events(raw, 'STI 014')
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('grad', 'eog'), baseline=(None, 0.), preload=True,
reject=dict(grad=4000e-13, eog=150e-6), decim=10)
epochs.pick_types(meg=True, exclude='bads') # remove stim and EOG
del raw
X = epochs.get_data() # MEG signals: n_epochs, n_meg_channels, n_times
y = epochs.events[:, 2] # target: auditory left vs visual left
###############################################################################
# Transformation classes
# ======================
#
# Scaler
# ^^^^^^
# The :class:`mne.decoding.Scaler` will standardize the data based on channel
# scales. In the simplest modes ``scalings=None`` or ``scalings=dict(...)``,
# each data channel type (e.g., mag, grad, eeg) is treated separately and
# scaled by a constant. This is the approach used by e.g.,
# :func:`mne.compute_covariance` to standardize channel scales.
#
# If ``scalings='mean'`` or ``scalings='median'``, each channel is scaled using
# empirical measures. Each channel is scaled independently by the mean and
# standand deviation, or median and interquartile range, respectively, across
# all epochs and time points during :class:`~mne.decoding.Scaler.fit`
# (during training). The :meth:`~mne.decoding.Scaler.transform` method is
# called to transform data (training or test set) by scaling all time points
# and epochs on a channel-by-channel basis. To perform both the ``fit`` and
# ``transform`` operations in a single call, the
# :meth:`~mne.decoding.Scaler.fit_transform` method may be used. To invert the
# transform, :meth:`~mne.decoding.Scaler.inverse_transform` can be used. For
# ``scalings='median'``, scikit-learn_ version 0.17+ is required.
#
# .. note:: Using this class is different from directly applying
# :class:`sklearn.preprocessing.StandardScaler` or
# :class:`sklearn.preprocessing.RobustScaler` offered by
# scikit-learn_. These scale each *classification feature*, e.g.
# each time point for each channel, with mean and standard
# deviation computed across epochs, whereas
# :class:`mne.decoding.Scaler` scales each *channel* using mean and
# standard deviation computed across all of its time points
# and epochs.
#
# Vectorizer
# ^^^^^^^^^^
# Scikit-learn API provides functionality to chain transformers and estimators
# by using :class:`sklearn.pipeline.Pipeline`. We can construct decoding
# pipelines and perform cross-validation and grid-search. However scikit-learn
# transformers and estimators generally expect 2D data
# (n_samples * n_features), whereas MNE transformers typically output data
# with a higher dimensionality
# (e.g. n_samples * n_channels * n_frequencies * n_times). A Vectorizer
# therefore needs to be applied between the MNE and the scikit-learn steps
# like:
# Uses all MEG sensors and time points as separate classification
# features, so the resulting filters used are spatio-temporal
clf = make_pipeline(Scaler(epochs.info),
Vectorizer(),
LogisticRegression(solver='lbfgs'))
scores = cross_val_multiscore(clf, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
score = np.mean(scores, axis=0)
print('Spatio-temporal: %0.1f%%' % (100 * score,))
###############################################################################
# PSDEstimator
# ^^^^^^^^^^^^
# The :class:`mne.decoding.PSDEstimator`
# computes the power spectral density (PSD) using the multitaper
# method. It takes a 3D array as input, converts it into 2D and computes the
# PSD.
#
# FilterEstimator
# ^^^^^^^^^^^^^^^
# The :class:`mne.decoding.FilterEstimator` filters the 3D epochs data.
#
# Spatial filters
# ===============
#
# Just like temporal filters, spatial filters provide weights to modify the
# data along the sensor dimension. They are popular in the BCI community
# because of their simplicity and ability to distinguish spatially-separated
# neural activity.
#
# Common spatial pattern
# ^^^^^^^^^^^^^^^^^^^^^^
#
# :class:`mne.decoding.CSP` is a technique to analyze multichannel data based
# on recordings from two classes :footcite:`Koles1991` (see also
# https://en.wikipedia.org/wiki/Common_spatial_pattern).
#
# Let :math:`X \in R^{C\times T}` be a segment of data with
# :math:`C` channels and :math:`T` time points. The data at a single time point
# is denoted by :math:`x(t)` such that :math:`X=[x(t), x(t+1), ..., x(t+T-1)]`.
# Common spatial pattern (CSP) finds a decomposition that projects the signal
# in the original sensor space to CSP space using the following transformation:
#
# .. math:: x_{CSP}(t) = W^{T}x(t)
# :label: csp
#
# where each column of :math:`W \in R^{C\times C}` is a spatial filter and each
# row of :math:`x_{CSP}` is a CSP component. The matrix :math:`W` is also
# called the de-mixing matrix in other contexts. Let
# :math:`\Sigma^{+} \in R^{C\times C}` and :math:`\Sigma^{-} \in R^{C\times C}`
# be the estimates of the covariance matrices of the two conditions.
# CSP analysis is given by the simultaneous diagonalization of the two
# covariance matrices
#
# .. math:: W^{T}\Sigma^{+}W = \lambda^{+}
# :label: diagonalize_p
# .. math:: W^{T}\Sigma^{-}W = \lambda^{-}
# :label: diagonalize_n
#
# where :math:`\lambda^{C}` is a diagonal matrix whose entries are the
# eigenvalues of the following generalized eigenvalue problem
#
# .. math:: \Sigma^{+}w = \lambda \Sigma^{-}w
# :label: eigen_problem
#
# Large entries in the diagonal matrix corresponds to a spatial filter which
# gives high variance in one class but low variance in the other. Thus, the
# filter facilitates discrimination between the two classes.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_eeg.py`
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_timefreq.py`
#
# .. note::
#
# The winning entry of the Grasp-and-lift EEG competition in Kaggle used
# the :class:`~mne.decoding.CSP` implementation in MNE and was featured as
# a `script of the week <sotw_>`_.
#
# .. _sotw: http://blog.kaggle.com/2015/08/12/july-2015-scripts-of-the-week/
#
# We can use CSP with these data with:
csp = CSP(n_components=3, norm_trace=False)
clf_csp = make_pipeline(csp, LinearModel(LogisticRegression(solver='lbfgs')))
scores = cross_val_multiscore(clf_csp, X, y, cv=5, n_jobs=1)
print('CSP: %0.1f%%' % (100 * scores.mean(),))
###############################################################################
# Source power comodulation (SPoC)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Source Power Comodulation (:class:`mne.decoding.SPoC`)
# :footcite:`DahneEtAl2014` identifies the composition of
# orthogonal spatial filters that maximally correlate with a continuous target.
#
# SPoC can be seen as an extension of the CSP where the target is driven by a
# continuous variable rather than a discrete variable. Typical applications
# include extraction of motor patterns using EMG power or audio patterns using
# sound envelope.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_spoc_CMC.py`
#
# xDAWN
# ^^^^^
# :class:`mne.preprocessing.Xdawn` is a spatial filtering method designed to
# improve the signal to signal + noise ratio (SSNR) of the ERP responses
# :footcite:`RivetEtAl2009`. Xdawn was originally
# designed for P300 evoked potential by enhancing the target response with
# respect to the non-target response. The implementation in MNE-Python is a
# generalization to any type of ERP.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_preprocessing_plot_xdawn_denoising.py`
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_xdawn_eeg.py`
#
# Effect-matched spatial filtering
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# The result of :class:`mne.decoding.EMS` is a spatial filter at each time
# point and a corresponding time course :footcite:`SchurgerEtAl2013`.
# Intuitively, the result gives the similarity between the filter at
# each time point and the data vector (sensors) at that time point.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_decoding_plot_ems_filtering.py`
#
# Patterns vs. filters
# ^^^^^^^^^^^^^^^^^^^^
#
# When interpreting the components of the CSP (or spatial filters in general),
# it is often more intuitive to think about how :math:`x(t)` is composed of
# the different CSP components :math:`x_{CSP}(t)`. In other words, we can
# rewrite Equation :eq:`csp` as follows:
#
# .. math:: x(t) = (W^{-1})^{T}x_{CSP}(t)
# :label: patterns
#
# The columns of the matrix :math:`(W^{-1})^T` are called spatial patterns.
# This is also called the mixing matrix. The example
# :ref:`sphx_glr_auto_examples_decoding_plot_linear_model_patterns.py`
# discusses the difference between patterns and filters.
#
# These can be plotted with:
# Fit CSP on full data and plot
csp.fit(X, y)
csp.plot_patterns(epochs.info)
csp.plot_filters(epochs.info, scalings=1e-9)
###############################################################################
# Decoding over time
# ==================
#
# This strategy consists in fitting a multivariate predictive model on each
# time instant and evaluating its performance at the same instant on new
# epochs. The :class:`mne.decoding.SlidingEstimator` will take as input a
# pair of features :math:`X` and targets :math:`y`, where :math:`X` has
# more than 2 dimensions. For decoding over time the data :math:`X`
# is the epochs data of shape n_epochs x n_channels x n_times. As the
# last dimension of :math:`X` is the time, an estimator will be fit
# on every time instant.
#
# This approach is analogous to SlidingEstimator-based approaches in fMRI,
# where here we are interested in when one can discriminate experimental
# conditions and therefore figure out when the effect of interest happens.
#
# When working with linear models as estimators, this approach boils
# down to estimating a discriminative spatial filter for each time instant.
#
# Temporal decoding
# ^^^^^^^^^^^^^^^^^
#
# We'll use a Logistic Regression for a binary classification as machine
# learning model.
# We will train the classifier on all left visual vs auditory trials on MEG
clf = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot
fig, ax = plt.subplots()
ax.plot(epochs.times, scores, label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC') # Area Under the Curve
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Sensor space decoding')
###############################################################################
# You can retrieve the spatial filters and spatial patterns if you explicitly
# use a LinearModel
clf = make_pipeline(StandardScaler(),
LinearModel(LogisticRegression(solver='lbfgs')))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
time_decod.fit(X, y)
coef = get_coef(time_decod, 'patterns_', inverse_transform=True)
evoked_time_gen = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
evoked_time_gen.plot_joint(times=np.arange(0., .500, .100), title='patterns',
**joint_kwargs)
###############################################################################
# Temporal generalization
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# Temporal generalization is an extension of the decoding over time approach.
# It consists in evaluating whether the model estimated at a particular
# time instant accurately predicts any other time instant. It is analogous to
# transferring a trained model to a distinct learning problem, where the
# problems correspond to decoding the patterns of brain activity recorded at
# distinct time instants.
#
# The object to for Temporal generalization is
# :class:`mne.decoding.GeneralizingEstimator`. It expects as input :math:`X`
# and :math:`y` (similarly to :class:`~mne.decoding.SlidingEstimator`) but
# generates predictions from each model for all time instants. The class
# :class:`~mne.decoding.GeneralizingEstimator` is generic and will treat the
# last dimension as the one to be used for generalization testing. For
# convenience, here, we refer to it as different tasks. If :math:`X`
# corresponds to epochs data then the last dimension is time.
#
# This runs the analysis used in :footcite:`KingEtAl2014` and further detailed
# in :footcite:`KingDehaene2014`:
# define the Temporal generalization object
time_gen = GeneralizingEstimator(clf, n_jobs=1, scoring='roc_auc',
verbose=True)
scores = cross_val_multiscore(time_gen, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot the diagonal (it's exactly the same as the time-by-time decoding above)
fig, ax = plt.subplots()
ax.plot(epochs.times, np.diag(scores), label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Decoding MEG sensors over time')
###############################################################################
# Plot the full (generalization) matrix:
fig, ax = plt.subplots(1, 1)
im = ax.imshow(scores, interpolation='lanczos', origin='lower', cmap='RdBu_r',
extent=epochs.times[[0, -1, 0, -1]], vmin=0., vmax=1.)
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Temporal generalization')
ax.axvline(0, color='k')
ax.axhline(0, color='k')
plt.colorbar(im, ax=ax)
###############################################################################
# Projecting sensor-space patterns to source space
# ================================================
# If you use a linear classifier (or regressor) for your data, you can also
# project these to source space. For example, using our ``evoked_time_gen``
# from before:
cov = mne.compute_covariance(epochs, tmax=0.)
del epochs
fwd = mne.read_forward_solution(
data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif')
inv = mne.minimum_norm.make_inverse_operator(
evoked_time_gen.info, fwd, cov, loose=0.)
stc = mne.minimum_norm.apply_inverse(evoked_time_gen, inv, 1. / 9., 'dSPM')
del fwd, inv
###############################################################################
# And this can be visualized using :meth:`stc.plot <mne.SourceEstimate.plot>`:
brain = stc.plot(hemi='split', views=('lat', 'med'), initial_time=0.1,
subjects_dir=subjects_dir)
###############################################################################
# Source-space decoding
# =====================
#
# Source space decoding is also possible, but because the number of features
# can be much larger than in the sensor space, univariate feature selection
# using ANOVA f-test (or some other metric) can be done to reduce the feature
# dimension. Interpreting decoding results might be easier in source space as
# compared to sensor space.
#
# .. topic:: Examples
#
# * :ref:`tut_dec_st_source`
#
# Exercise
# ========
#
# - Explore other datasets from MNE (e.g. Face dataset from SPM to predict
# Face vs. Scrambled)
#
# References
# ==========
# .. footbibliography::
| bsd-3-clause |
andyh616/mne-python | tutorials/plot_raw_objects.py | 15 | 5335 | """
.. _tut_raw_objects
The :class:`Raw <mne.io.RawFIF>` data structure: continuous data
================================================================
"""
from __future__ import print_function
import mne
import os.path as op
from matplotlib import pyplot as plt
###############################################################################
# Continuous data is stored in objects of type :class:`Raw <mne.io.RawFIF>`.
# The core data structure is simply a 2D numpy array (channels × samples,
# `._data`) combined with an :class:`Info <mne.io.meas_info.Info>` object
# (`.info`) (:ref:`tut_info_objects`.
#
# The most common way to load continuous data is from a .fif file. For more
# information on :ref:`loading data from other formats <ch_raw>`, or creating
# it :ref:`from scratch <tut_creating_data_structures>`.
###############################################################################
# Loading continuous data
# -----------------------
# Load an example dataset, the preload flag loads the data into memory now
data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
'sample', 'sample_audvis_raw.fif')
raw = mne.io.RawFIF(data_path, preload=True, verbose=False)
# Give the sample rate
print('sample rate:', raw.info['sfreq'], 'Hz')
# Give the size of the data matrix
print('channels x samples:', raw._data.shape)
###############################################################################
# Information about the channels contained in the :class:`Raw <mne.io.RawFIF>`
# object is contained in the :class:`Info <mne.io.meas_info.Info>` attribute.
# This is essentially a dictionary with a number of relevant fields (see
# :ref:`tut_info_objects`).
###############################################################################
# Indexing data
# -------------
#
# There are two ways to access the data stored within :class:`Raw
# <mne.io.RawFIF>` objects. One is by accessing the underlying data array, and
# the other is to index the :class:`Raw <mne.io.RawFIF>` object directly.
#
# To access the data array of :class:`Raw <mne.io.Raw>` objects, use the
# `_data` attribute. Note that this is only present if `preload==True`.
print('Shape of data array:', raw._data.shape)
array_data = raw._data[0, :1000]
_ = plt.plot(array_data)
###############################################################################
# You can also pass an index directly to the :class:`Raw <mne.io.RawFIF>`
# object. This will return an array of times, as well as the data representing
# those timepoints. This may be used even if the data is not preloaded:
# Extract data from the first 5 channels, from 1 s to 3 s.
sfreq = raw.info['sfreq']
data, times = raw[:5, int(sfreq * 1):int(sfreq * 3)]
_ = plt.plot(times, data.T)
_ = plt.title('Sample channels')
###############################################################################
# -----------------------------------------
# Selecting subsets of channels and samples
# -----------------------------------------
#
# It is possible to use more intelligent indexing to extract data, using
# channel names, types or time ranges.
# Pull all MEG gradiometer channels:
# Make sure to use copy==True or it will overwrite the data
meg_only = raw.pick_types(meg=True, copy=True)
eeg_only = raw.pick_types(meg=False, eeg=True, copy=True)
# The MEG flag in particular lets you specify a string for more specificity
grad_only = raw.pick_types(meg='grad', copy=True)
# Or you can use custom channel names
pick_chans = ['MEG 0112', 'MEG 0111', 'MEG 0122', 'MEG 0123']
specific_chans = raw.pick_channels(pick_chans, copy=True)
print(meg_only, eeg_only, grad_only, specific_chans, sep='\n')
###############################################################################
# Notice the different scalings of these types
f, (a1, a2) = plt.subplots(2, 1)
eeg, times = eeg_only[0, :int(sfreq * 2)]
meg, times = meg_only[0, :int(sfreq * 2)]
a1.plot(times, meg[0])
a2.plot(times, eeg[0])
###############################################################################
# You can restrict the data to a specific time range
restricted = raw.crop(5, 7) # in seconds
print('New time range from', restricted.times.min(), 's to',
restricted.times.max(), 's')
###############################################################################
# And drop channels by name
restricted = restricted.drop_channels(['MEG 0241', 'EEG 001'])
print('Number of channels reduced from', raw.info['nchan'], 'to',
restricted.info['nchan'])
###############################################################################
# --------------------------------------------------
# Concatenating :class:`Raw <mne.io.RawFIF>` objects
# --------------------------------------------------
#
# :class:`Raw <mne.io.RawFIF>` objects can be concatenated in time by using the
# :func:`append <mne.io.RawFIF.append>` function. For this to work, they must
# have the same number of channels and their :class:`Info
# <mne.io.meas_info.Info>` structures should be compatible.
# Create multiple :class:`Raw <mne.io.RawFIF>` objects
raw1 = raw.copy().crop(0, 10)
raw2 = raw.copy().crop(10, 20)
raw3 = raw.copy().crop(20, 100)
# Concatenate in time (also works without preloading)
raw1.append([raw2, raw3])
print('Time extends from', raw1.times.min(), 's to', raw1.times.max(), 's')
| bsd-3-clause |
mMPI-MRI/TractLAS | utils.py | 1 | 74376 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 23, 2017
Compute large connectomes using mrtrix and sparse matrices
@author: Christopher Steele
"""
from __future__ import division # to allow floating point calcs of number of voxels
from pandas import read_csv
def natural_sort(l):
"""
Returns alphanumerically sorted input
#natural sort from the interwebs (http://stackoverflow.com/questions/11150239/python-natural-sorting)
"""
import re
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
def mask2voxelList(mask_img, out_file = None, coordinate_space = 'scanner', mask_threshold = 0, decimals = 2):
"""
Calculate coordinates for all voxels greater than mask threshold
:param bin_mask:
:param out_file:
:param coordinate_space: {'scanner', 'voxel'} - default is 'scanner', where each voxel center is returned after applying the affine matrix
:param mask_threshold: values lower or equal to this are set to 0, greater set to 1
:param decimals: decimals for rounding coordinates
:return: voxel_coordinates
"""
from nibabel.loadsave import load as imgLoad
from nibabel.affines import apply_affine
import os
import numpy as np
if out_file is None:
out_dir = os.path.dirname(mask_img)
out_name = os.path.basename(mask_img).split(".")[0] + "_" + coordinate_space + "_coords.csv" #take up to the first "."
out_file = os.path.join(out_dir,out_name)
#import the data
img = imgLoad(mask_img)
d = img.get_data()
aff = img.affine
#binarise the data
d[d<=mask_threshold] = 0
d[d>mask_threshold] = 1
vox_coord = np.array(np.where(d == 1)).T # nx3 array of values of three dimensions
if coordinate_space is "voxel":
np.savetxt(out_file, vox_coord, delimiter=",",fmt="%d")
#return vox_coord
elif coordinate_space is "scanner":
scanner_coord = np.round(apply_affine(aff, vox_coord),decimals=decimals)
np.savetxt(out_file, scanner_coord, delimiter=",",fmt="%." + str(decimals) +"f")
#return scanner_coord
return out_file
def generate_connectome_nodes(mask_img, include_mask_img = None, cubed_subset_dim = None,
max_num_labels_per_mask = None, out_sub_dir ="cnctm", start_idx = 1,
out_file_base = None, zfill_num = 4, coordinate_space = "scanner",
include_mask_subset_dim = None, coordinate_precision = 4, VERBOSE = True,
skip_all_computations = False):
"""
Generate cubes of unique indices to cover the entire volume, multiply them by your binary mask_img, then split up
into multiple mask node files of no more than max_num_labels_per_mask (for mem preservation) and re-index each to
start at start_idx (default = 1, best to stick with this).
Appx 4900 nodes creates appx 44mb connectome file (text) and file grows as the square of node number, so 1.8x larger (8820) should be just under 1GB
- but this will likely break pd.read_csv unless you write a line by line reader :-/
- 20,000 node limits appear to be reasonable for 16gb computer, yet to see how the connectomes can be combined, since it will be large.
Saves each combination of subsets of rois as *index_label_?_?.nii.gz, along with
:param mask_img:
:param include_mask_img: must be in the same space as mask_img, indices of this mask will come at the end and they will supersede those of the original mask_img XXX
:param cubed_subset_dim:
:param max_num_labels_per_mask:
:param out_sub_dir:
:param start_idx: don't set this to anything other than 1 unless you have gone through the code. mrtrix does not like this and I haven't made sure that it works properly throuhgout all functions
:param out_file_base:
:param zfill_num: number of 0s to pad indices with so that filenames look nice (always use natural sort, however!)
:param coordinate_space: coordinate space for output of voxel locations (either apply transform {"scanner"} or voxel space {"voxel"}
:param include_mask_subset_dim; different subsetting for include mask, None provides same subsetting dimensions as set by cubed_subset_dim (1 should give voxel-wise, but will be slow-ish)
:param coordinate_precision: number of decimals to retain in LUT coordinates
:param VERBOSE: blahblahblah
:return:
"""
# TODO: skip computation on flag, but still generate all of the correct filenames for later stages...
import nibabel as nb
import numpy as np
if out_file_base is None:
out_file_base = mask_img.split(".")[0]+"_index_label"
if out_sub_dir is not None:
import os
if include_mask_img is not None:
out_sub_dir = out_sub_dir + "_includeMask"
if include_mask_subset_dim is not None:
out_sub_dir = out_sub_dir + "_{}".format(str(include_mask_subset_dim))
if cubed_subset_dim is not None:
out_sub_dir = out_sub_dir + "_cubed_" + str(cubed_subset_dim)
if max_num_labels_per_mask is not None:
out_sub_dir = out_sub_dir + "_maxLabelsPmask_" + str(max_num_labels_per_mask)
out_dir = os.path.join(os.path.dirname(out_file_base), out_sub_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_file_base = os.path.join(out_dir, os.path.basename(out_file_base))
print(out_file_base)
img = nb.loadsave.load(mask_img)
d = img.get_data().astype(np.uint64)
aff = img.affine
header = img.header
if include_mask_img is not None:
img2 = nb.loadsave.load(include_mask_img)
d2 = img2.get_data().astype(np.uint64)
wm_label_mapping_file = None
if cubed_subset_dim is not None:
print("Generating labels and LUT file for cubed indices. This may take a while if you have many indices.") # TODO: make this faster
cubed_3d = get_cubed_array_labels_3d(np.shape(d), cubed_subset_dim).astype(np.uint32)
d = np.multiply(d, cubed_3d) # apply the cube to the data
#extremely fast way to replace values, suggested here: http://stackoverflow.com/questions/13572448/change-values-in-a-numpy-array
palette = np.unique(d) #INCLUDES 0
key = np.arange(0,len(palette))
index = np.digitize(d.ravel(), palette, right=True)
d = key[index].reshape(d.shape)
if include_mask_img is not None:
apply_new_cubed_dim = True
if include_mask_subset_dim is not None:
print("Alternative subsetting for additional included mask image.")
if include_mask_subset_dim == 1:
all_vox_locs_d2 = np.array(np.where(d2 == 1)).T
wm_first_label = np.max(d) + 1
idx = wm_first_label
for vox in all_vox_locs_d2: # increment the second mask (wm) from where we left off
d2[vox[0], vox[1], vox[2]] = idx
idx += 1
wm_label_count = len(np.unique(d2)) - 1
# d[d2 > 0] = d2[d2 > 0] # again, overwriting the label if we also have it in the wm mask
apply_new_cubed_dim = False #flag so we don't apply the cubed dimensions
else:
cubed_3d = get_cubed_array_labels_3d(np.shape(d), include_mask_subset_dim).astype(np.uint32) # changed the subset dim as required by user
if apply_new_cubed_dim:
d2 = np.multiply(d2, cubed_3d)
palette2 = np.unique(d2) # INCLUDES 0
key = np.arange(0, len(palette2))
key[1:] = key[1:] + np.max(d) #create the offset in the labels
wm_first_label = np.max(d)+1
wm_label_count = len(palette2)-1
index = np.digitize(d2.ravel(), palette2, right=True)
d2 = key[index].reshape(d2.shape)
d[d2 > 0] = d2[d2 > 0] #overwrite the d value with second mask -makes assumptions about what the WM mask will look like, which may not be well founded.
#need to do this again in case we overwrote an index TODO: better way to do this?
palette = np.unique(d) #INCLUDES 0
key = np.arange(0,len(palette))
wm_remapped_label = key[palette == wm_first_label]
#save the new first label and number of labels for the second mask (wm)
wm_label_mapping_file = out_file_base + "_subset_" + str(0).zfill(zfill_num) + "_" + str(0).zfill(zfill_num) + "_labels_lut_all_labels_wm_start_val_num.txt"
np.savetxt(wm_label_mapping_file, np.array([wm_remapped_label,wm_label_count]), fmt = "%i")
index = np.digitize(d.ravel(), palette, right=True)
d = key[index].reshape(d.shape)
print("Second mask image incorporated")
lut = np.zeros((len(palette)-1, 4)) #non-zero LUT
all_vox_locs = np.array(np.where(d>0)).T
all_vox_idx_locs = np.zeros((len(all_vox_locs),4)) # will contain lut_idx_val, x, y, z
all_vox_idx_locs[:,1:] = all_vox_locs
print("Determining the value of non-zero voxels in the combined mask.")
idx = 0
for vox in all_vox_locs:
all_vox_idx_locs[idx,0]=d[vox[0], vox[1], vox[2]]
idx += 1
print("Calculating lut and coordinates for centroid in each subset (slow).")
print(" there are {} individual labels that need to have their centroids calculated... ".format(len(np.unique(all_vox_idx_locs[:,0]))))
idx = 0
#TODO: make this faster. It is possibly the slowest part of the code.
for lut_idx in np.unique(all_vox_idx_locs[:,0]):
vox_subset = all_vox_idx_locs[np.where(all_vox_idx_locs[:,0] == lut_idx),:]
if vox_subset.ndim == 1:
this_lut = vox_subset[1:]
else:
this_lut = np.mean(vox_subset, axis = 1)
# if VERBOSE:
# print(this_lut)
lut[idx, :] = this_lut
idx += 1
print("Completed generating LUT file for cubed indices.")
else:
all_vox_locs = np.array(np.where(d == 1)).T
idx = start_idx
for vox in all_vox_locs:
d[vox[0], vox[1], vox[2]] = idx
idx += 1
lut = np.zeros((np.shape(all_vox_locs)[0], np.shape(all_vox_locs)[1] + 1))
lut[:, 1:] = all_vox_locs
lut[:, 0] = np.arange(1, np.shape(all_vox_locs)[0] + 1)
if include_mask_img is not None: #update the original node file with the second image that was included, update the lut and index too!
all_vox_locs_d2 = np.array(np.where(d2 == 1)).T
wm_first_label = idx
for vox in all_vox_locs_d2: #increment the second mask (wm) from where we left off
d2[vox[0], vox[1], vox[2]] = idx
idx += 1
wm_label_count = len(np.unique(d2)) - 1
d[d2>0] = d2[d2>0] #again, overwriting the label if we also have it in the wm mask
#need to do this again in case we overwrote an index TODO: better way to do this?
palette = np.unique(d) #INCLUDES 0
key = np.arange(0,len(palette))
wm_remapped_label = key[palette == wm_first_label]
wm_label_mapping_file = out_file_base + "_subset_" + str(0).zfill(zfill_num) + "_" + str(0).zfill(zfill_num) + "_labels_lut_all_labels_wm_start_val_num.txt"
np.savetxt(wm_label_mapping_file, np.array([wm_remapped_label,wm_label_count]), fmt = "%i")
index = np.digitize(d.ravel(), palette, right=True)
d = key[index].reshape(d.shape)
all_vox_locs = np.array(np.where(d > 0)).T
lut = np.zeros((np.shape(all_vox_locs)[0], np.shape(all_vox_locs)[1] + 1))
lut[:, 1:] = all_vox_locs
idx = 0
for vox in all_vox_locs:
lut[idx, 0] = d[vox[0], vox[1], vox[2]]
idx += 1
# TODO: check whether all_vox_locs is correct in every case, otherwise may need to do inside the if statements
if coordinate_space == "scanner":
lut[:, 1:] = nb.affines.apply_affine(aff, lut[:, 1:])
lut_header = "index_label,x_coord_scan,y_coord_scan,z_coord_scan"
elif coordinate_space == "voxel":
#lut[:, 1:] = all_vox_locs
lut[:, 1:] = np.round(lut[:, 1:]).astype(int) #round them and convert to integers, since we may be between slices for geometric mean if we used the cubed option
lut_header = "index_label,x_coord_vox,y_coord_vox,z_coord_vox"
out_file_lut = out_file_base + "_subset_" + str(0).zfill(zfill_num) + "_" + str(0).zfill(zfill_num) + "_labels_lut_all.txt"
np.savetxt(out_file_lut, lut, header=lut_header, delimiter=",", fmt="%." + str(coordinate_precision) + "f")
lut_fname = out_file_lut #TODO: clean up the language
out_file_lut = out_file_base + "_subset_" + str(0).zfill(zfill_num) + "_" + str(0).zfill(zfill_num) + "_labels_lut_all_labels.txt"
np.savetxt(out_file_lut, lut[:,0].astype(int), delimiter=",", fmt="%d",header="index_label")
img = nb.Nifti1Image(d, aff, header)
img.set_data_dtype("uint64")
master_label_index_file = out_file_base + "_subset_" + str(0).zfill(zfill_num) + "_" + str(0).zfill(zfill_num) + "_all.nii.gz"
nb.save(img, master_label_index_file)
print(out_file_base + "_subset_" + str(0).zfill(zfill_num) + "_" + str(0).zfill(zfill_num) + "_all.nii.gz")
unique = np.unique(d)
non_zero_labels = unique[np.nonzero(unique)]
print(str(len(non_zero_labels))+ " unique labels (excluding 0)")
if (max_num_labels_per_mask is not None) and (max_num_labels_per_mask < len(non_zero_labels)): #we cut things up
import itertools
all_out_files = []
all_out_files_luts = []
num_sub_arrays = int(np.ceil(len(non_zero_labels) / (max_num_labels_per_mask / 2)))
cube_labels_split = np.array_split(non_zero_labels, num_sub_arrays)
all_sets = list(itertools.combinations(np.arange(0, num_sub_arrays), 2))
print("There are {} mask combinations to be created.".format(len(all_sets)))
idx = 0
for set in all_sets:
idx += 1
print("\nGenerating set {0} of {1} sets".format(idx,len(all_sets)))
superset = np.concatenate((cube_labels_split[set[0]], cube_labels_split[set[1]]), axis=0) #contains labels
d_temp = np.zeros(d.shape)
# this has been checked, and returns identical indices as with a simple loop
all_idxs = np.copy(non_zero_labels)
all_idxs[np.logical_not(np.in1d(non_zero_labels, superset))] = 0 #where our indices are not in the superset, set to 0
all_idxs[np.in1d(non_zero_labels, superset)] = np.arange(0,len(superset)) + start_idx #where they are in the index, reset them to increasing
key = all_idxs #this is the vector of values that will be populated into the matrix
palette = non_zero_labels
index = np.digitize(d.ravel(), palette, right=True)
d_temp = key[index].reshape(d.shape)
d_temp[d==0] = 0 #0 ends up being set to 1 because of edge case, so set them all back to 0
tail = "_subset_" + str(set[0]).zfill(zfill_num) + "_" + str(set[1]).zfill(zfill_num)
out_file = out_file_base + tail + ".nii.gz"
out_file_lut = out_file_base + tail + "_labels.txt"
print("Subset includes {} non-zero labels.".format(len(superset)))
print("Set combination: {}".format(set))
if VERBOSE:
print(out_file)
print(out_file_lut)
img_out = nb.Nifti1Image(d_temp.astype(np.uint64), aff, header=header)
img_out.set_data_dtype("uint64")
nb.loadsave.save(img_out, out_file)
np.savetxt(out_file_lut, superset, delimiter=",", fmt="%d",header="index_label") #not the voxel locations, just the LUT TODO:change? requires change in matrix combination
all_out_files.append(out_file)
all_out_files_luts.append(out_file_lut)
else:
master_label_index_file = out_file_base + "_subset_" + str(0).zfill(zfill_num) + "_" + str(0).zfill(zfill_num) + "_all.nii.gz"
all_out_files = master_label_index_file
all_out_files_luts = out_file_lut
return all_out_files, all_out_files_luts, out_sub_dir, lut_fname, wm_label_mapping_file, master_label_index_file
def do_it_all(tck_file, mask_img, include_mask_img = None, tck_weights_file = None, cubed_subset_dim = 3,
include_mask_subset_dim = None, max_num_labels_per_mask = 20000, out_mat_file=None,
coordinate_space = "scanner", intermediate_mat_format = "mmap", split_gm_wm = False, CLOBBER = False):
"""
Generate node files, lut, label index lookups, extract connectome from subsets of node files (as necessary), and
recombine connectome and save.
:param tck_file:
:param mask_img:
:param include_mask_img:
:param tck_weights_file:
:param cubed_subset_dim:
:param max_num_labels_per_mask:
:param out_mat_file:
:param coordinate_space:
:param intermediate_mat_format:
:return:
"""
# appx 5 hrs for dim=3, max labels=5k (without combining the connectome)
from scipy import io
import os
node_masks, node_mask_luts, out_dir, lut_fname, wm_label_mapping_file, master_label_index_file = generate_connectome_nodes(mask_img, include_mask_img = include_mask_img,
cubed_subset_dim=cubed_subset_dim, include_mask_subset_dim = include_mask_subset_dim,
max_num_labels_per_mask=max_num_labels_per_mask,
coordinate_space=coordinate_space)
connectome_files = tck2connectome_collection(tck_file, node_masks, tck_weights_file=tck_weights_file,
assign_all_mask_img = include_mask_img)
connectome_files_length = tck2connectome_collection(tck_file, node_masks, tck_weights_file=None,
assign_all_mask_img = include_mask_img, stat_mean_length=True)
if include_mask_img is not None:
# we expect two sets of connectome files back
connectome_files_assignEnd = cnctm_mat2sparse_pickle(connectome_files[0]) #TODO: this may not end up being necessary, see what you think on performance?
connectome_files_assignAll = cnctm_mat2sparse_pickle(connectome_files[1])
mat = combine_connectome_matrices_sparse(connectome_files_assignEnd, node_mask_luts,
template_img_file=master_label_index_file, lut_file=lut_fname,
intermediate_mat_format=None,
wm_label_mapping_file=wm_label_mapping_file,
coordinate_space=coordinate_space, split_gm_wm= split_gm_wm)
mat_assignAll = combine_connectome_matrices_sparse(connectome_files_assignAll, node_mask_luts,
template_img_file=master_label_index_file, lut_file=lut_fname,
intermediate_mat_format=None,
wm_label_mapping_file= wm_label_mapping_file,
coordinate_space=coordinate_space, split_gm_wm= split_gm_wm)
#and for the lengths, which should probably also have two? TODO: figure out if it makes sense to have only one length param?
mat_length = combine_connectome_matrices_sparse(connectome_files_length, node_mask_luts,
template_img_file=master_label_index_file, lut_file=lut_fname,
intermediate_mat_format=None,
wm_label_mapping_file=wm_label_mapping_file,
coordinate_space=coordinate_space, split_gm_wm=split_gm_wm)
# mat_assignAll_length = combine_connectome_matrices_sparse(connectome_files_length[1], node_mask_luts,
# template_img_file=master_label_index_file, lut_file=lut_fname,
# intermediate_mat_format=None,
# wm_label_mapping_file=wm_label_mapping_file,
# coordinate_space=coordinate_space, split_gm_wm=split_gm_wm)
else:
connectome_files = cnctm_mat2sparse_pickle(connectome_files)
mat = combine_connectome_matrices_sparse(connectome_files,node_mask_luts,template_img_file=master_label_index_file,
lut_file=lut_fname,intermediate_mat_format=None,
wm_label_mapping_file=wm_label_mapping_file,
coordinate_space=coordinate_space, split_gm_wm= False)
# writing of data now taken care of in combine_connectome_matrices
if intermediate_mat_format is not "mmap":
if out_mat_file is None:
out_mat_file = os.path.join(out_dir,os.path.basename(mask_img).split(".")[0] + "_all_cnctm_mat_complete")
print("\nFull matrix stored in: {} .mtx/.mat".format(out_mat_file + "_{tail}"))
if include_mask_img is not None:
io.mmwrite(out_mat_file + "_assignEnd" + ".mtx", mat)
io.savemat(out_mat_file + "_assignEnd" + ".mat", {'mat': mat})
io.mmwrite(out_mat_file + "_assignAll" + ".mtx", mat_assignAll)
io.savemat(out_mat_file + "_assignAll" + ".mat", {'mat': mat_assignAll})
return mat, mat_assignAll
else:
io.mmwrite(out_mat_file + ".mtx", mat)
io.savemat(out_mat_file + ".mat", {'mat':mat})
return mat
def cnctm_mat2sparse_pickle(connectome_files_list, CLOBBER = False, delete_intermediate = True):
""" convert connectome matrix (or list of files) to sparse.lil_matrix pickle"""
import scipy.sparse as sparse
import numpy as np
from pandas import read_csv
import time
import os
try:
import cPickle as pickle
except:
import pickle
print("Lets get pickling! (https://en.wikipedia.org/wiki/Pickling)")
if not isinstance(connectome_files_list,list):
connectome_files_list = [connectome_files_list]
out_files = []
for in_file in connectome_files_list:
out_file = in_file.split(".")[0] + "_sparse.pickle"
if os.path.exists(out_file):
print("Pickle file exists, not recreating it unless you set CLOBBER to True.")
else:
start_t=time.time()
print("\n Loading data and preparing pickle:\n{}".format(out_file))
pickle.dump(sparse.lil_matrix(read_csv(in_file, sep = " ", header = None, dtype=np.float32).values), open(out_file,'wb'), protocol = pickle.HIGHEST_PROTOCOL)
print("Elapsed time for conversion to sparse matrix and pickle dump: {:.2f} s".format(time.time()-start_t))
if delete_intermediate:
try:
if os.path.exists(out_file):
os.remove(in_file)
print(" ... removed intermediate file: {}".format(in_file))
except:
print("--- Could not remove the intermediate file {}---".format(in_file))
out_files.append(out_file)
return out_files
# streamlined version of this is below, without the _complete suffix
def combine_connectome_matrices_sparse_complete(connectome_files_list, connectome_files_index_list, label_max = None,
template_img_file = None, lut_file = None,
intermediate_mat_format = None, compression_level = 6, shuffle = False,
fletcher32 = True, CLOBBER = False,
delete_intermediate = True, wm_label_mapping_file = None,
coordinate_space = None, split_gm_wm = False):
"""
Combine mrtrix3 tck2connectome generated submatrices into a single matrix, store as an hdf5 file with useful metadata
or as a matrix file if the overall matrix is small enought to be loaded into memory.
:param connectome_files_list: list of connectome files, each a portion of the full matrix
:param connectome_files_index_list: list of files that contain the label indices of each submatrix to the full matrix
:param label_max: <not implemented>
:param template_img_file: the master node file for the full matrix
:param lut_file: lut for the full matrix, (label_id, x_coord, y_coord, z_coord)
:param intermediate_mat_format: {None, 'pickle','mtx','csv','hdf5','mmap'} matrix format to store intermediate files,
use None, all others are only suitable for relatlvely small matrices and/or not efficient
:param compression_level: compression level for gzip of hdf5 {0...9}, default = 6
:param shuffle: shuffle filter for hdf5, seems to increase disk usage rather than decrease with this data
:param fletcher32: checksum implementation for hdf5
:param CLOBBER: <not fully implemented yet>
:param delete_intermediate: {True, False} remove intermediate files
:param wm_label_mapping_file: file that contains the mapping of the first "wm label" and the number of wm labels (i.e., the start label_id for the include_mask file)
:param coordinate_space: {'voxel','scanner'} whether or not the lut is in voxel or scanner space (voxel preferred), only for metadata in hdf5
:return: matrix or matrix file name (hdf5)
"""
print(template_img_file)
import pandas as pd
import numpy as np
from scipy import io, sparse
import os
try:
import cPickle as pickle
except:
import pickle
if not isinstance(connectome_files_index_list, list):
connectome_files_index_list = [connectome_files_index_list]
if not isinstance(connectome_files_list, list):
connectome_files_list = [connectome_files_list]
print("\n[Combining connectome files]\nConnectome files include: ")
#first check the indices so you know how large things are
if label_max is None:
label_max = 0
for idx, file in enumerate(connectome_files_index_list):
print(file)
label_idx = np.ndarray.flatten(pd.read_csv(file, header = 0, dtype=np.uint32).values) #read quickly, then break out of the array of arrays of dimension 1
if np.max(label_idx) > label_max:
label_max = np.max(label_idx)
mat = sparse.lil_matrix((label_max,label_max))
print("--------------------------------------------------------------\nConnectome combination from {0} files in progress:".format(len(connectome_files_list)))
print(" Attempting to construct a {0}x{0} sparse matrix from {1} connectome files".format(label_max,len(connectome_files_list)))
import time
#assume that the file list and the index list are in the same order, now we can build the matrix - USE NATURAL SORT!
# try:
skip_mat_update = False # confusing, but don't have time to rewrite how CLOBBER is handling things
start_time = time.time()
for idx, file in enumerate(connectome_files_list):
if "assignAll" in file:
tag = "_assignAll"
elif "assignEnd" in file:
tag = "_assignEnd"
elif "length" in file:
tag = "_length"
subset_txt = "_".join(file.split("_subset_")[1].split("_")[0:2])
print("[{0}/{3}]:\n matrix: {1}\n index : {2}".format(idx+1,
file.split('/')[-1],
connectome_files_index_list[idx].split('/')[-1],
len(connectome_files_list)))
label_idx = np.ndarray.flatten(pd.read_csv(connectome_files_index_list[idx], header = 0, dtype=np.uint32).values)
lookup_col = label_idx - 1 #assuming that the start index is 1, which is a bad assumption?
lookup_row = lookup_col.T
# there is extra information at the self-self (eg, subset 0000 with itself) conncetome because it is stored in
# multiple subsets, no known way to get around this duplication, so it is ignored (and overwritten in the matrix
# file so it ends up having no effect
print(" ... subset {0} ({1}x{1} subset)".format(subset_txt,len(lookup_row)))
if intermediate_mat_format == "pickle":
#data = sparse.lil_matrix(pickle.load(open(file,'rb')))
mat[np.ix_(lookup_row,lookup_col)] = sparse.lil_matrix(pickle.load(open(file,'rb')))
elif intermediate_mat_format == "mtx": #about 5x slower for saving and loading than pickled sparse matrix
#data = sparse.lil_matrix(io.mmread(file))
mat[np.ix_(lookup_row,lookup_col)] = sparse.lil_matrix(io.mmread(file))
elif intermediate_mat_format == "csv":
mat[np.ix_(lookup_row,lookup_col)] = pd.read_csv(file, sep = " ", header = None, dtype=np.float32).values #this works (tested on small sub-matrices) but not sure if all cases are covered?
elif intermediate_mat_format == None: #go straight to the HDF5 file, this is now the preferred format, since it saves time and memory!
if idx == 0: #create the hdf file
old_mat_file_exists = False
hdf5_fname = os.path.join(os.path.dirname(file),os.path.basename(file).split("_")[0] + tag + "_cnctm_mat_single_None.hdf5")
import h5py
f = h5py.File(hdf5_fname,'a')
print(" ... created hdf5 file with compression level {0} ({1})".format(compression_level,hdf5_fname))
try:
dset = f.create_dataset('mat', shape=(label_max,label_max),dtype=np.float32,
compression='gzip', compression_opts=compression_level, shuffle=shuffle,
fletcher32=fletcher32)
except:
print(" ... hdf5 file already exits, not changing ({})".format(os.path.basename(hdf5_fname)))
old_mat_file_exists = True
if not old_mat_file_exists:
# determine where we switch from one continuous subset of labels another (breakpoint)
brk = np.nonzero(np.logical_not(np.diff(label_idx)==1))
brk = np.ndarray.flatten(np.array(brk)) #flatten from tuple
print(" ... loading the data from the subset matrix file"),
time_inner = time.time()
if file.split(".")[-1] == "pickle": # we just need to read the binary blob
mat = sparse.lil_matrix(pickle.load(open(file, 'rb'))).toarray()
elif file.split(".")[-1] == "txt": # we need to pull from the raw txt ouptut from mrtrix
mat = pd.read_csv(file, sep=" ", header=None, dtype=np.float32).values
print(" (took {0:.2f} sec ({1:.2f} min))".format(time.time() - time_inner, (time.time() - time_inner) / 60.))
if len(brk) > 1: # 2x check that there is only a single change from continuous index
print("Oh shit.... indices are not correct!")
print("brk: {}".format(brk))
break
elif len(brk) == 0: #we don't need to split because they are continuous (e.g., 0000_0001)
print(" ... updating"),
time_inner = time.time()
dset[label_idx[0]-1:label_idx[-1], label_idx[0]-1:label_idx[-1]] = mat
print("took {0:.2f} sec ({1:.2f} min)".format(time.time() - time_inner, (time.time() - time_inner) / 60.))
else:
brk = brk[0]
sub1 = label_idx[[0, brk]] - 1 #index into large 0-based matrix, first subset
sub2 = label_idx[[brk+1,-1]] - 1 #index into large 0-based matrix, second subset
print(" ... updating"),
time_inner = time.time()
#update the hdf5 file with the four sets portions of the submatrix
dset[sub1[0]:sub1[1]+1,sub1[0]:sub1[1]+1] = mat[0:brk+1,0:brk+1] #some of this is redundant, can we be smarter?
dset[sub1[0]:sub1[1]+1,sub2[0]:sub2[1]+1] = mat[0:brk+1,brk+1:]
dset[sub2[0]:sub2[1]+1,sub1[0]:sub1[1]+1] = mat[brk+1:,0:brk+1]
dset[sub2[0]:sub2[1]+1,sub2[0]:sub2[1]+1] = mat[brk+1:,brk+1:]
print("took {0:.2f} sec ({1:.2f} min)".format(time.time() - time_inner, (time.time() - time_inner) / 60.))
## ---------------------------------------------------------------------------------------------------------------------------- ##
elif intermediate_mat_format == "hdf5": #TODO: this is currently a hack on top of the code here, to test hdf5 with an existing dataset
if idx == 0: #create the hdf file
hdf5_fname = os.path.join(os.path.dirname(file),os.path.basename(file).split("_")[0] + tag + "_cnctm_mat_subsets.hdf5")
import h5py
f = h5py.File(hdf5_fname,'a')
print(" ... created hdf5 file with compression level {0} ({1})".format(compression_level,hdf5_fname))
print(" ... updating")
if subset_txt in f:
print(" {} dataset already exists".format(subset_txt))
else:
dset = f.create_dataset(subset_txt, data = sparse.lil_matrix(pickle.load(open(file,'rb'))).toarray(),
compression = 'gzip', compression_opts = compression_level, shuffle=shuffle,
fletcher32=fletcher32)
elif intermediate_mat_format == "mmap": #TODO: cleanup messy if statements
# consider switching to open_memmap in np.lib.format http://stackoverflow.com/questions/36749082/load-np-memmap-without-knowing-shape
mmap_fname = os.path.join(os.path.dirname(file),
os.path.basename(file).split("_")[0] + tag + "_cnctm_mat_single.dat")
if CLOBBER and os.path.exists(mmap_fname) and idx == 0:
os.remove(mmap_fname)
if os.path.exists(mmap_fname) and idx == 0:
print(" intermediate mmap file already exists, skipping creation (set CLOBBER=True to overwrite) {}".format(mmap_fname))
skip_mat_update = True
else:
if idx == 0:
mmap_mat = np.memmap(mmap_fname,mode = 'w+', shape=(label_max,label_max),dtype=np.float32)
print(" ... created np.memmap file ({})".format(mmap_fname))
if not skip_mat_update:
print(" ... updating"),
time_inner = time.time()
if file.split(".")[-1] == "pickle": #we just need to read the binary blob
mmap_mat[np.ix_((lookup_row),(lookup_col))] = sparse.lil_matrix(pickle.load(open(file,'rb'))).toarray()
elif file.split(".")[-1] == "txt": #we need to pull from the raw txt ouptut from mrtrix
mmap_mat[np.ix_((lookup_row), (lookup_col))] = pd.read_csv(file, sep=" ", header=None, dtype=np.float32).values
print("took {0:.2f} sec ({1:.2f} min)".format(time.time() - time_inner, (time.time() - time_inner) / 60.))
print("[All files mapped to intermediate file in {0:.2f} seconds ({1:.2f} minutes)]\n".format(time.time() - start_time,
(time.time() - start_time) / 60.))
if intermediate_mat_format == "pickle" or intermediate_mat_format == "mtx":
mtx_fname = os.path.join(os.path.dirname(file),
os.path.basename(file).split("_")[0] + tag + "_cnctm_mat_sparse_single.mtx")
io.mmwrite(mtx_fname,mat)
elif intermediate_mat_format == "mmap":
import h5py
if CLOBBER and os.path.exists(hdf5_fname):
os.remove(hdf5_fname)
print(" ... old hdf5 file removed ({})".format(hdf5_fname))
time_inner = time.time()
print(" ... reading memmapped data from file ({})".format(mmap_fname))
mmap_mat = np.memmap(mmap_fname, mode='r', shape=(label_max,label_max), dtype=np.float32)
print(" Matrix shape/type: {}/{}".format(mmap_mat.shape,mmap_mat.dtype))
#TODO: these fail if the file already exists
if split_gm_wm and wm_label_mapping_file is not None:
hdf5_fname = mmap_fname.split(".")[0].split("_single")[0] + "_gm_wm" + ".hdf5"
f = h5py.File(hdf5_fname, 'a')
print(" ... creating hdf5 file with compression level {0} ({1})".format(compression_level, hdf5_fname))
try:
wm_start_num = np.ndarray.flatten(pd.read_csv(wm_label_mapping_file, sep=",", header=None).values)
gm = f.create_dataset("gm", data = mmap_mat[0:wm_start_num[0]-1,0:wm_start_num[0]-1],dtype=mmap_mat.dtype,
shape=(mmap_mat[0:wm_start_num[0]-1,0:wm_start_num[0]-1].shape), compression='gzip',
compression_opts=compression_level, shuffle=shuffle, fletcher32=fletcher32)
wm = f.create_dataset("wm", data = mmap_mat[wm_start_num[0]-1:,wm_start_num[0]-1:],dtype=mmap_mat.dtype,
shape=(mmap_mat[wm_start_num[0]-1:,wm_start_num[0]-1:].shape), compression='gzip',
compression_opts=compression_level, shuffle=shuffle, fletcher32=fletcher32)
gm2wm = f.create_dataset("gm2wm", data = mmap_mat[0:wm_start_num[0]-1,wm_start_num[0]-1:],dtype=mmap_mat.dtype,
shape=(mmap_mat[0:wm_start_num[0]-1,wm_start_num[0]-1:].shape), compression='gzip',
compression_opts=compression_level, shuffle=shuffle, fletcher32=fletcher32)
print(" ... Datasets 'gm','wm', and 'gm2wm' created")
except:
print(" ... Did not change datasets 'gm' and 'wm'")
else:
hdf5_fname = mmap_fname.split(".")[0] + ".hdf5"
f = h5py.File(hdf5_fname, 'a')
print(" ... creating hdf5 file with compression level {0} ({1})".format(compression_level, hdf5_fname))
try:
dset = f.create_dataset("mat", data=mmap_mat, dtype=mmap_mat.dtype, shape=(label_max, label_max),
compression='gzip', compression_opts=compression_level,shuffle=shuffle,
fletcher32=fletcher32)
if wm_label_mapping_file is not None:
wm_start_num = np.ndarray.flatten(pd.read_csv(wm_label_mapping_file, sep=",", header=None).values)
dset.attrs['wm_start_val_num'] = wm_start_num
print(" ... Dataset 'mat' created")
except:
print(" ... Did not change the dataset: 'mat'")
if 'mmap_mat' in locals():
print(" ... deleting mmap variable")
del mmap_mat
print("[Conversion to hdf5 took {0:.2f} sec ({1:.2f} min)]".format(time.time() - time_inner, (time.time() - time_inner) / 60.))
if delete_intermediate:
try:
os.remove(mmap_fname)
print(" ... removed intermediate file: {}".format(mmap_fname))
except:
print("--- Could not remove the mmap file {}---".format(mmap_fname))
if intermediate_mat_format == "hdf5" \
or intermediate_mat_format == "hdf5_all_in_one" \
or intermediate_mat_format == "mmap" \
or intermediate_mat_format == None:
# add more data to the hdf5 file
if template_img_file is not None:
print(" ... adding the template image to the hdf5 file in group 'template_img' ('/data' '/affine' '/header')")
import nibabel as nb
img = nb.load(template_img_file)
try:
grp = f.create_group('template_img')
grp.create_dataset('data',data=img.get_data(), shape=img.shape, dtype=img.get_data().dtype,
compression='gzip', compression_opts=compression_level, shuffle=shuffle,
fletcher32=fletcher32)
grp.create_dataset('affine', data=img.affine, dtype=img.affine.dtype)
except:
print(" ... Did not change the dataset: 'template_img/data' or 'template_img/affine'")
try:
grp2 = grp.create_group('header')
for name in img.header:
grp2.create_dataset(name, data=img.header[name], dtype=img.header[name].dtype)
except:
(" ... Did not change the dataset: 'template_img/header'")
if lut_file is not None:
try:
lut_d = pd.read_csv(lut_file, sep=",", header=0).values
lut = f.create_dataset('lut', data=lut_d, dtype=lut_d.dtype,
compression = 'gzip', compression_opts = compression_level, shuffle=shuffle,
fletcher32=fletcher32)
if coordinate_space is not None:
lut.attrs['coordinate_space'] = coordinate_space
if wm_label_mapping_file is not None:
try:
wm_start_num = np.ndarray.flatten(pd.read_csv(wm_label_mapping_file, sep=",", header=None).values)
lut.attrs['wm_start_num'] = wm_start_num
except:
print(" ... Did not change the wm_start_num attribute of 'lut'")
except:
print(" ... Did not change the dataset: 'lut'")
f.flush()
f.close()
if intermediate_mat_format == "hdf5" \
or intermediate_mat_format == "hdf5_all_in_one" \
or intermediate_mat_format == None:
return hdf5_fname
else:
return mat
# streamlined version of the function, takes out the "work in progress" code for different types of storage
# this is the one to use...
def combine_connectome_matrices_sparse(connectome_files_list, connectome_files_index_list, label_max = None,
template_img_file = None, lut_file = None,
compression_level = 6, shuffle = False,
fletcher32 = True, CLOBBER = False,
delete_intermediate = True, wm_label_mapping_file = None,
coordinate_space = None, split_gm_wm = False):
"""
Combine mrtrix3 tck2connectome generated submatrices into a single matrix, store as an hdf5 file with useful metadata
or as a matrix file if the overall matrix is small enought to be loaded into memory.
:param connectome_files_list: list of connectome files, each a portion of the full matrix
:param connectome_files_index_list: list of files that contain the label indices of each submatrix to the full matrix
:param label_max: <not implemented>
:param template_img_file: the master node file for the full matrix
:param lut_file: lut for the full matrix, (label_id, x_coord, y_coord, z_coord)
:param compression_level: compression level for gzip of hdf5 {0...9}, default = 6
:param shuffle: shuffle filter for hdf5, seems to increase disk usage rather than decrease with this data
:param fletcher32: checksum implementation for hdf5
:param CLOBBER: <not fully implemented yet>
:param delete_intermediate: <not implemented here> {True, False} remove intermediate files
:param wm_label_mapping_file: file that contains the mapping of the first "wm label" and the number of wm labels (i.e., the start label_id for the include_mask file)
:param coordinate_space: {'voxel','scanner'} whether or not the lut is in voxel or scanner space (voxel preferred), only for metadata in hdf5
:return: matrix or matrix file name (hdf5)
"""
print(template_img_file)
import pandas as pd
import numpy as np
from scipy import io, sparse
import os
try:
import cPickle as pickle
except:
import pickle
if not isinstance(connectome_files_index_list, list):
connectome_files_index_list = [connectome_files_index_list]
if not isinstance(connectome_files_list, list):
connectome_files_list = [connectome_files_list]
print("\n[Combining connectome files]\nConnectome files include: ")
#first check the indices so you know how large things are
if label_max is None:
label_max = 0
for idx, file in enumerate(connectome_files_index_list):
print(file)
label_idx = np.ndarray.flatten(pd.read_csv(file, header = 0, dtype=np.uint32).values) #read quickly, then break out of the array of arrays of dimension 1
if np.max(label_idx) > label_max:
label_max = np.max(label_idx)
mat = sparse.lil_matrix((label_max,label_max))
print("--------------------------------------------------------------\nConnectome combination from {0} files in progress:".format(len(connectome_files_list)))
print(" Attempting to construct a {0}x{0} sparse matrix from {1} connectome files".format(label_max,len(connectome_files_list)))
import time
#assume that the file list and the index list are in the same order, now we can build the matrix - USE NATURAL SORT!
# try:
skip_mat_update = False # confusing, but don't have time to rewrite how CLOBBER is handling things
start_time = time.time()
for idx, file in enumerate(connectome_files_list):
if "assignAll" in file:
tag = "_assignAll"
elif "assignEnd" in file:
tag = "_assignEnd"
if "length" in file:
tag = "_length" + tag
subset_txt = "_".join(file.split("_subset_")[1].split("_")[0:2])
print("[{0}/{3}]:\n matrix: {1}\n index : {2}".format(idx+1,
file.split('/')[-1],
connectome_files_index_list[idx].split('/')[-1],
len(connectome_files_list)))
label_idx = np.ndarray.flatten(pd.read_csv(connectome_files_index_list[idx], header = 0, dtype=np.uint32).values)
lookup_col = label_idx - 1 #assuming that the start index is 1, which is a bad assumption?
lookup_row = lookup_col.T
# there is extra information at the self-self (eg, subset 0000 with itself) conncetome because it is stored in
# multiple subsets, no known way to get around this duplication, so it is ignored (and overwritten in the matrix
# file so it ends up having no effect
print(" ... subset {0} ({1}x{1} subset)".format(subset_txt,len(lookup_row)))
if idx == 0: #create the hdf file
old_mat_file_exists = False
hdf5_fname = os.path.join(os.path.dirname(file),os.path.basename(file).split("_")[0] + tag + "_cnctm_mat_single_None.hdf5")
import h5py
f = h5py.File(hdf5_fname,'a')
print(" ... created hdf5 file with compression level {0} ({1})".format(compression_level,hdf5_fname))
try:
dset = f.create_dataset('mat', shape=(label_max,label_max),dtype=np.float32,
compression='gzip', compression_opts=compression_level, shuffle=shuffle,
fletcher32=fletcher32)
except:
print(" ... hdf5 file already exits, not changing ({})".format(os.path.basename(hdf5_fname)))
old_mat_file_exists = True
if not old_mat_file_exists:
# determine where we switch from one continuous subset of labels another (breakpoint)
brk = np.nonzero(np.logical_not(np.diff(label_idx)==1))
brk = np.ndarray.flatten(np.array(brk)) #flatten from tuple
print(" ... loading the data from the subset matrix file"),
time_inner = time.time()
if file.split(".")[-1] == "pickle": # we just need to read the binary blob
mat = sparse.lil_matrix(pickle.load(open(file, 'rb'))).toarray()
elif file.split(".")[-1] == "txt": # we need to pull from the raw txt ouptut from mrtrix
mat = pd.read_csv(file, sep=" ", header=None, dtype=np.float32).values
print(" (took {0:.2f} sec ({1:.2f} min))".format(time.time() - time_inner, (time.time() - time_inner) / 60.))
if len(brk) > 1: # 2x check that there is only a single change from continuous index
print("Oh shit.... indices are not correct!")
print("brk: {}".format(brk))
break
elif len(brk) == 0: #we don't need to split because they are continuous (e.g., 0000_0001)
print(" ... updating"),
time_inner = time.time()
dset[label_idx[0]-1:label_idx[-1], label_idx[0]-1:label_idx[-1]] = mat
print("took {0:.2f} sec ({1:.2f} min)".format(time.time() - time_inner, (time.time() - time_inner) / 60.))
else:
brk = brk[0]
sub1 = label_idx[[0, brk]] - 1 #index into large 0-based matrix, first subset
sub2 = label_idx[[brk+1,-1]] - 1 #index into large 0-based matrix, second subset
print(" ... updating"),
time_inner = time.time()
#update the hdf5 file with the four portions of the submatrix
dset[sub1[0]:sub1[1]+1,sub1[0]:sub1[1]+1] = mat[0:brk+1,0:brk+1] #some of this is redundant, can we be smarter?
dset[sub1[0]:sub1[1]+1,sub2[0]:sub2[1]+1] = mat[0:brk+1,brk+1:]
dset[sub2[0]:sub2[1]+1,sub1[0]:sub1[1]+1] = mat[brk+1:,0:brk+1]
dset[sub2[0]:sub2[1]+1,sub2[0]:sub2[1]+1] = mat[brk+1:,brk+1:]
print("took {0:.2f} sec ({1:.2f} min)".format(time.time() - time_inner, (time.time() - time_inner) / 60.))
print("[All files mapped to intermediate file in {0:.2f} seconds ({1:.2f} minutes)]\n".format(time.time() - start_time,
(time.time() - start_time) / 60.))
## add metadata to the file
if template_img_file is not None:
print(" ... adding the template image to the hdf5 file in group 'template_img' ('/data' '/affine' '/header')")
import nibabel as nb
img = nb.load(template_img_file)
try:
grp = f.create_group('template_img')
grp.create_dataset('data',data=img.get_data(), shape=img.shape, dtype=img.get_data().dtype,
compression='gzip', compression_opts=compression_level, shuffle=shuffle,
fletcher32=fletcher32)
grp.create_dataset('affine', data=img.affine, dtype=img.affine.dtype)
except:
print(" ... Did not change the dataset: 'template_img/data' or 'template_img/affine'")
try:
grp2 = grp.create_group('header')
for name in img.header:
grp2.create_dataset(name, data=img.header[name], dtype=img.header[name].dtype)
except:
(" ... Did not change the dataset: 'template_img/header'")
if lut_file is not None:
try:
lut_d = pd.read_csv(lut_file, sep=",", header=0).values
lut = f.create_dataset('lut', data=lut_d, dtype=lut_d.dtype,
compression = 'gzip', compression_opts = compression_level, shuffle=shuffle,
fletcher32=fletcher32)
if coordinate_space is not None:
lut.attrs['coordinate_space'] = coordinate_space
if wm_label_mapping_file is not None:
try:
wm_start_num = np.ndarray.flatten(pd.read_csv(wm_label_mapping_file, sep=",", header=None).values)
lut.attrs['wm_start_num'] = wm_start_num
except:
print(" ... Did not change the wm_start_num attribute of 'lut'")
except:
print(" ... Did not change the dataset: 'lut'")
f.flush()
f.close()
return hdf5_fname
def tck2connectome_collection(tck_file, node_files, tck_weights_file = None, assign_all_mask_img = None, nthreads = 8,
stat_mean_length = False, CLOBBER = False):
"""
:param tck_file:
:param node_files:
:param tck_weights_file:
:param assign_all_mask_img: if not None, then we change the call to include all voxels in the mask rather than just endpoints (useful for inclusion of wm mask)
automatically runs -assignment_all_voxels AND -assignment_end_voxels (files in 0, 1 of returned variable respectively)
:param nthreads:
:param stat_mean_length: return the mean length between nodes, rather than the streamline/weighted streamline count
:param CLOBBER:
:return: cnctm_files
"""
import subprocess
import os
out_files = []
out_files_assignEnd = []
if not isinstance(node_files,list):
node_files = [node_files] #make iterable
for idx, node_file in enumerate(node_files):
out_file = node_file.split(".")[0]
out_file_assignEnd = node_file.split(".")[0]
cmd = ["/home/chris/Documents/code/mrtrix3_devel/bin/tck2connectome", tck_file, node_file,
"-nthreads", str(nthreads), "-force"]
cmd_assignEnd = list(cmd) #need to copy the list, otherwise we share it :-(
if tck_weights_file is not None:
out_file = out_file + "_weights"
cmd.extend(["-tck_weights_in", tck_weights_file])
if assign_all_mask_img is not None:
# we also need to run the assignEnd, so create another variable to hold this and the command
if stat_mean_length:
out_file = out_file + "_length"
out_file_assignEnd = out_file_assignEnd + "_length" #these next three lines don't end up doing anything
cmd_assignEnd.extend(["-stat_edge", "mean", "-scale_length"])
cmd.extend(["-stat_edge", "mean", "-scale_length"])
out_file_assignEnd = out_file_assignEnd + "_assignEnd" + "_cnctm_mat.txt"
cmd_assignEnd.extend(["-assignment_end_voxels", out_file_assignEnd])
out_file = out_file + "_assignAll" + "_cnctm_mat.txt"
cmd.extend(["-assignment_all_voxels", out_file])
out_files_assignEnd.append(out_file_assignEnd)
else:
if stat_mean_length:
out_file = out_file + "_length"
cmd.extend(["-stat_edge", "mean", "-scale_length"])
out_file = out_file + "_assignEnd" + "_cnctm_mat.txt"
cmd.extend(["-assignment_end_voxels", out_file])
print("Generating connectome file: {}".format(out_file))
print("")
print(" ".join(cmd))
if assign_all_mask_img is not None and not(stat_mean_length):
print(" ".join(cmd_assignEnd))
if os.path.exists(out_file):
if not CLOBBER:
print("The file already exists, not recreating it. (set CLOBBER=True if you want to overwrite)")
else:
pass
subprocess.call(cmd)
if assign_all_mask_img is not None and not(stat_mean_length): #we don't need to call it twice if we are looking for the length parameter
subprocess.call(cmd_assignEnd)
out_files.append(out_file)
if assign_all_mask_img is not None and not(stat_mean_length):
return [out_files_assignEnd, out_files]
else:
return out_files
def mask2labels_multifile(mask_img, out_file_base = None, max_num_labels_per_mask = 1000, output_lut_file = False,
decimals = 2, start_idx = 1, coordinate_space = "scanner", cubed_subset_dim = None):
"""
Convert simple binary mask to voxels that are labeled from 1..n.
Outputs as uint32 in the hopes that you don't have over the max (4294967295)
(i don't check, that is a crazy number of voxels...)
:param mask_img: any 3d image format that nibabel can read
:param out_file_base: nift1 format, base file name will be appended with voxel parcels _?x?.nii.gz
:param output_lut_file ouptut a lut csv file for all voxels True/False (this could be large!)
:param decimals: number of decimals for output lut file
:param start_idx: value to start index at, normally =1, unless you are combining masks...?
:return:
"""
import nibabel as nb
import numpy as np
import itertools
if out_file_base is None:
import os
out_file_base = os.path.join(os.path.dirname(mask_img),os.path.basename(mask_img).split(".")[0]+"_index_label")
img = nb.loadsave.load(mask_img)
d = img.get_data().astype(np.uint64)
aff = img.affine
header = img.header
all_vox_locs = np.array(np.where(d==1)).T
num_vox = np.shape(all_vox_locs)[0]
if cubed_subset_dim is not None and cubed_subset_dim > 1:
print("Generating cubed subsets of your binary input mask")
cubed_3d = get_cubed_array_labels_3d(np.shape(d),cubed_subset_dim)
d = np.multiply(d, cubed_3d).astype(np.uint32) #apply the cube to the data
# #print(np.unique(d))
# for idx, val in enumerate(np.unique(d)):
# d[d==val] = idx + start_idx #move back to values based on start_idx (usually 1)
#extremely fast way of re-assigning values
palette = np.unique(d)
key = np.arange(0, len(palette)) + start_idx - 1 #offset as required
key[0] = 0 #retain 0 as the first index, since this is background
key[0] = 0 #retain 0 as the first index, since this is background
index = np.digitize(d.ravel(), palette, right=True)
d = key[index].reshape(d.shape)
num_sub_arrays = int(np.ceil(max_num_labels_per_mask / 2)) #just use this value, since we will use the sub-arrays not individual voxels
cube_label_idxs = np.array_split(np.unique(d)[np.nonzero(np.unique(d))],num_sub_arrays)
d_orig = np.copy(d)
else: #we are doing this voxel-wise, go for whole hog!
num_sub_arrays = int(np.ceil(num_vox / (max_num_labels_per_mask / 2)))
sub_vox_locs = np.array_split(all_vox_locs, num_sub_arrays)
out_file_names = []
out_file_lut_names = []
all_sets = list(itertools.combinations(np.arange(0,num_sub_arrays),2))
print("Total number of combinations: {}".format(len(all_sets)))
for subset in all_sets: #TODO: fix for cubed subsets, since it does not work :-(
fir = subset[0]
sec = subset[1]
tail = "_label_subset_" + str(fir) + "_" + str(sec)
out_file = out_file_base + tail + ".nii.gz"
out_file_lut = out_file_base + tail + "_coords.csv"
print(out_file)
print(out_file_lut)
d[d > 0] = 0 # don't need this data array anymore, so zero it and re-use
label_idx = start_idx
#return d_orig, subset, all_sets, fir, sec, cube_label_idxs
if cubed_subset_dim is not None and cubed_subset_dim > 1:
#we asked for cubes, so use them but have to refer to the volumetric cube data rather than the voxel locations
superset = np.concatenate((cube_label_idxs[fir], cube_label_idxs[sec]), axis = 0)
for superset_idx in superset:
d[d_orig == superset_idx] = label_idx
label_idx += 1
d = d.astype(np.uint64)
else:
superset = np.concatenate((sub_vox_locs[fir], sub_vox_locs[sec]), axis = 0)
for vox in superset:
d[vox[0], vox[1], vox[2]] = label_idx
label_idx += 1
img_out = nb.Nifti1Image(d, aff, header=header)
img_out.set_data_dtype("uint64")
#print("Max label value/num voxels: {}".format(str(start_idx)))
nb.loadsave.save(img_out, out_file)
if coordinate_space is "voxel":
np.savetxt(out_file_lut, superset, delimiter=",", fmt="%d")
# return vox_coord
elif coordinate_space is "scanner":
scanner_coord = np.round(nb.affines.apply_affine(aff, superset), decimals=decimals)
np.savetxt(out_file_lut, scanner_coord, delimiter=",", fmt="%." + str(decimals) + "f")
out_file_names.append(out_file)
out_file_lut_names.append(out_file_lut)
return out_file_names, out_file_lut_names, sub_vox_locs
def get_cubed_array_labels_3d(shape, cube_subset_dim = 10):
"""
Break a 3d array into cubes of cube_dim. Throw away extras if array is not a perfect cube
:param shape: - 3d matrix shape
:param cube_subset_dim: - size, in voxels, of one dimension of cube
:return: - matrix of labeled cubes (or appx) of size cube_dim*cube_dim*cube_dim
"""
import numpy as np
#we make the matrix cubic to make the calculations easier, toss out the extras at the end
max_dim = np.max(shape)
num_cubes_per_dim = np.ceil(max_dim / cube_subset_dim).astype(int)
d = np.zeros((max_dim,max_dim,max_dim))
#determine the size of each cube based on the number of cubes that we will cut the supercube into (yes, this is basically the reverse of above)
x_span = np.ceil(max_dim / num_cubes_per_dim).astype(int)
y_span = x_span
z_span = x_span
print("Voxel span for single cube dimension: {}".format(x_span))
cube_idx = 0
for ix in np.arange(0, num_cubes_per_dim):
for iy in np.arange(0, num_cubes_per_dim):
for iz in np.arange(0, num_cubes_per_dim):
cube_idx += 1
x0 = ix*x_span
y0 = iy*y_span
z0 = iz*z_span
d[x0 : x0 + x_span, y0 : y0 + y_span, z0 : z0 + z_span] = cube_idx
return (d[0:shape[0],0:shape[1],0:shape[2]]).astype(np.uint64) #return only the dims that we requested, discard the extras at the edges
def gmwmvox2mesh(mask_img, mesh_format = "obj"):
from skimage import measure
import nibabel as nb
img = nb.load(mask_img)
d = img.get_data()
aff = img.affine
header = img.header
verts, faces = measure.marching_cubes(d,0)
return verts, faces
def mask2labels(mask_img, out_file = None, output_lut_file = False, decimals = 2, start_idx = 1):
"""
Convert simple binary mask to voxels that are labeled from 1..n.
Outputs as uint64 in the hopes that you don't have over the max (4294967295)
(i don't check, that is a crazy number of voxels!)
:param mask_img: any 3d image format that nibabel can read
:param out_file: nift1 format
:param output_lut_file ouptut a lut csv file for all voxels True/False (this could be large!)
:param decimals: number of decimals for output lut file
:param start_idx: value to start index at, normally =1, unless you are combining masks...?
:return:
"""
import nibabel as nb
import numpy as np
if out_file is None:
import os
out_file = os.path.join(os.path.dirname(mask_img),os.path.basename(mask_img).split(".")[0]+"_index_label.nii.gz")
img = nb.loadsave.load(mask_img)
d = img.get_data().astype(np.uint64)
aff = img.affine
header = img.header
vox_locs = np.array(np.where(d==1)).T
for vox in vox_locs:
d[vox[0], vox[1], vox[2]] = start_idx
start_idx += 1
if output_lut_file:
lut_file = os.path.join(os.path.dirname(mask_img),
os.path.basename(mask_img).split(".")[0] + "_index_label_lut.csv")
lut = np.zeros((np.shape(vox_locs)[0], np.shape(vox_locs)[1] + 1))
lut[:,1:] = nb.affines.apply_affine(aff,vox_locs)
lut[:, 0] = np.arange(1, np.shape(vox_locs)[0] + 1)
np.savetxt(lut_file, lut, header = "index,x_coord,y_coord,z_coord",delimiter=",",fmt="%." + str(decimals) +"f")
img_out = nb.Nifti1Image(d, aff, header=header)
img_out.set_data_dtype("uint64")
print("Max label value/num voxels: {}".format(str(start_idx)))
nb.loadsave.save(img_out,out_file)
return out_file, start_idx
def combine_and_label_2masks(mask1,mask2, out_file1 = None, out_file2 = None, output_lut_files = False, decimals = 2, start_idx = 1):
import os
import nibabel as nb
out_file1, end_idx = mask2labels(mask1, out_file = out_file1 , output_lut_file = output_lut_files , decimals = decimals, start_idx = start_idx)
out_file2, end_idx = mask2labels(mask2, out_file = out_file2 , output_lut_file = output_lut_files , decimals = decimals, start_idx = end_idx)
f1 = nb.load(out_file1)
f2 = nb.load(out_file2)
d_f1 = f1.get_data()
d_f2 = f2.get_data()
d_f2[d_f1>0] = d_f1[d_f1>0]
out_img = nb.Nifti1Image(d_f2,f2.affine,header=f2.header)
out_file = os.path.join(os.path.dirname(mask1),os.path.basename(mask1).split(".")[0]+"_joined_index_label.nii.gz")
nb.save(out_img,out_file)
def plot_coo_matrix(m):
""" Taken from: http://stackoverflow.com/questions/22961541/python-matplotlib-plot-sparse-matrix-pattern"""
import matplotlib.pyplot as plt
from scipy.sparse import coo_matrix
if not isinstance(m, coo_matrix):
m = coo_matrix(m)
fig = plt.figure()
ax = fig.add_subplot(111, facecolor='black')
ax.plot(m.col, m.row, 's', color='white', ms=1)
ax.set_xlim(0, m.shape[1])
ax.set_ylim(0, m.shape[0])
ax.set_aspect('equal')
for spine in ax.spines.values():
spine.set_visible(False)
ax.invert_yaxis()
ax.set_aspect('equal')
ax.set_xticks([])
ax.set_yticks([])
return fig
def matrix2voxel_map(label_idxs, matrix_file, lut_file = None, template_node_img=None, out_file_base = None, apply_inv_affine = False):
"""
Create visitation map or maps for single or multiple seed labels from template_node_img. Uses the information available in the .hdf5 matrix
file if properly formatted .hdf5 file provided
:param label_idxs: label or labels (from template_node_img / lut) to visualise connectivity for, output will be in separate files
:param matrix_file: the matrix that we are to pull the data from
:param lut_file: look up table of indices and voxel locations (only currently used to confirm that sparse matrix is the correct size, not checking location at the moment)
:param template_node_img: the full image that was used to create the matrix that the label is being extracted from
:param out_file: output full filename, leave blank for auto-generated filename in same location as template_node_img
:param apply_inv_affine: apply the inverse of the affine to remap the values
:return:
"""
import nibabel as nb
from scipy import io, sparse
import numpy as np
from pandas import read_csv
mat_format = matrix_file.split(".")[-1]
if mat_format == "dat" or mat_format == "hdf5":
import h5py
f = h5py.File(matrix_file, 'r')
mat = f['mat']
lut = f['lut'][:]
coordinate_space = f['lut'].attrs['coordinate_space']
d_orig = f['/template_img/data'][:]
#header_orig = f['/template_img/header']
aff = f['template_img/affine'][:]
print coordinate_space
if coordinate_space is not "voxel":
lut[:, 1:] = nb.affines.apply_affine(np.linalg.inv(aff), lut[:, 1:]).astype(int)
header = None #can't reconstruct header as of yet, possibly change the way it is stored in the hdf5 file?
out_file_base = matrix_file.split(".")[0] + "_cnctm_label_"
else:
if out_file_base is None:
out_file_base = template_node_img.split(".")[0] + "_cnctm_label_"
img = nb.load(template_node_img)
d_orig = img.get_data()
aff = img.affine
header = img.header
lut = read_csv(lut_file, sep=",", header=0).values
if apply_inv_affine:
lut[:, 1:] = nb.affines.apply_affine(np.linalg.inv(aff), lut[:, 1:])
lut = lut.astype(int)
if mat_format == "pickle":
try:
import cPickle as pickle
except:
import pickle
mat = pickle.load(open(matrix_file, 'rb'))
elif mat_format == "mtx":
mat = sparse.lil_matrix(io.mmread(matrix_file))
if not np.iterable(label_idxs):
label_idxs = np.array([label_idxs])
out_files = []
print("Re-labeling indices in template file (1-based indexing): ")
for label_idx in label_idxs:
print(" label index: {}".format(label_idx))
d = np.copy(d_orig)
out_file = out_file_base + str(label_idx) + "_map.nii.gz"
res = np.zeros(mat.shape[0])
# order the vector so that it follows the ordering of the lut (0..n)
if mat_format == "dat" or mat_format == "hdf5":
d_col = mat[0:label_idx -1,label_idx -1]
d_row = mat[label_idx - 1, label_idx- 1 :]
else:
d_col = mat[0:label_idx -1,label_idx -1].toarray().ravel()
d_row = mat[label_idx - 1, label_idx- 1 :].toarray().ravel()
res[0:label_idx-1] = d_col # top of the column, not including diag
res[label_idx-1:] = d_row # end of the row, including the diagonal
if not (lut.shape[0] == len(res)):
print("Shit, something went wrong! Your lut and matrix don't seem to match")
palette= np.unique(d) # INCLUDES 0
key = np.zeros(palette.shape)
key[1:] = res #leave the 0 for the first index, i.e., background
index = np.digitize(d.ravel(), palette, right=True)
d = key[index].reshape(d.shape)
img_out = nb.Nifti1Image(d,aff,header = header)
img_out.set_data_dtype('float32')
nb.save(img_out,out_file)
out_files.append(out_file)
print(" {}\n".format(out_file))
if mat_format == "mmap":
f.close()
return out_files
def map_values_to_label_file(values_label_lut_csv_fname, label_img_fname,
out_mapped_label_fname=None,
value_colName="Value",
label_idx_colName="Index",
SKIP_ZERO_IDX=True,
MATCH_VALUE_TO_LABEL_VIA_MATRIX=False,
VERBOSE=False):
"""
Map from values/index dataframe to labels in label_fname (for visualising results in label space)
#TODO: for some reason this doesn't always work -- you will need to look into it to make sure that it works when the .nii file has MORE indices than you expect given the matrix
:param values_label_lut_csv_fname: csv file mapping values to index in label_img_fname
:param label_img_fname: label file (nii or other)
:param out_mapped_label_fname: ouptut file name (nii/nii.gz only)
:param value_colName: name of column with values (default: Value)
:param label_idx_colName:name of column with index numbers (default: Index)
:param SKIP_ZERO_IDX: skips 0 (usually background) {True, False}
:param MATCH_VALUE_TO_LABEL_VIA_MATRIX: if true, values_label_lut_csv_fname is a matrix with first column = labels, 2nd = values
:return: out_mapped_label_fname
"""
import numpy as np
import pandas as pd
import os
if out_mapped_label_fname is None:
out_mapped_label_fname = os.path.splitext(os.path.splitext(label_img_fname)[0])[
0] + "_value_mapped.nii.gz" # takes care of two . extensions if necessary
if not MATCH_VALUE_TO_LABEL_VIA_MATRIX: # we expect a csv file
df = pd.read_csv(values_label_lut_csv_fname)
values = df[value_colName].values
indices = df[label_idx_colName].values
else: # otherwise just a matrix of values
indices = values_label_lut_csv_fname[:, 0]
values = values_label_lut_csv_fname[:, 1]
if SKIP_ZERO_IDX and 0 in indices:
indices = np.delete(indices, np.where(indices == 0))
d, a, h = imgLoad(label_img_fname, RETURN_HEADER=True)
d_out = np.zeros_like(d).astype(np.float32)
for idx, index in enumerate(indices):
if VERBOSE:
print index, values[idx]
d_out[d == index] = values[idx]
niiSave(out_mapped_label_fname, d_out, a, header=h)
return out_mapped_label_fname
| gpl-3.0 |
ray-project/ray | python/ray/tune/examples/xgboost_example.py | 1 | 3306 | import sklearn.datasets
import sklearn.metrics
import os
from ray.tune.schedulers import ASHAScheduler
from sklearn.model_selection import train_test_split
import xgboost as xgb
from ray import tune
from ray.tune.integration.xgboost import TuneReportCheckpointCallback
def train_breast_cancer(config: dict):
# This is a simple training function to be passed into Tune
# Load dataset
data, labels = sklearn.datasets.load_breast_cancer(return_X_y=True)
# Split into train and test set
train_x, test_x, train_y, test_y = train_test_split(
data, labels, test_size=0.25)
# Build input matrices for XGBoost
train_set = xgb.DMatrix(train_x, label=train_y)
test_set = xgb.DMatrix(test_x, label=test_y)
# Train the classifier, using the Tune callback
xgb.train(
config,
train_set,
evals=[(test_set, "eval")],
verbose_eval=False,
callbacks=[TuneReportCheckpointCallback(filename="model.xgb")])
def get_best_model_checkpoint(analysis):
best_bst = xgb.Booster()
best_bst.load_model(os.path.join(analysis.best_checkpoint, "model.xgb"))
accuracy = 1. - analysis.best_result["eval-error"]
print(f"Best model parameters: {analysis.best_config}")
print(f"Best model total accuracy: {accuracy:.4f}")
return best_bst
def tune_xgboost():
search_space = {
# You can mix constants with search space objects.
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
"max_depth": tune.randint(1, 9),
"min_child_weight": tune.choice([1, 2, 3]),
"subsample": tune.uniform(0.5, 1.0),
"eta": tune.loguniform(1e-4, 1e-1)
}
# This will enable aggressive early stopping of bad trials.
scheduler = ASHAScheduler(
max_t=10, # 10 training iterations
grace_period=1,
reduction_factor=2)
analysis = tune.run(
train_breast_cancer,
metric="eval-logloss",
mode="min",
# You can add "gpu": 0.1 to allocate GPUs
resources_per_trial={"cpu": 1},
config=search_space,
num_samples=10,
scheduler=scheduler)
return analysis
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using "
"Ray Client.")
args, _ = parser.parse_known_args()
if args.server_address:
import ray
ray.util.connect(args.server_address)
analysis = tune_xgboost()
# Load the best model checkpoint.
if args.server_address:
# If connecting to a remote server with Ray Client, checkpoint loading
# should be wrapped in a task so it will execute on the server.
# We have to make sure it gets executed on the same node that
# ``tune.run`` is called on.
from ray.tune.utils import force_on_current_node
remote_fn = force_on_current_node(
ray.remote(get_best_model_checkpoint))
best_bst = ray.get(remote_fn.remote(analysis))
else:
best_bst = get_best_model_checkpoint(analysis)
# You could now do further predictions with
# best_bst.predict(...)
| apache-2.0 |
wujinjun/TFbook | chapter5/models-master/autoencoder/VariationalAutoencoderRunner.py | 12 | 1653 | import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder_models.VariationalAutoencoder import VariationalAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def min_max_scale(X_train, X_test):
preprocessor = prep.MinMaxScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = VariationalAutoencoder(n_input = 784,
n_hidden = 200,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
| gpl-3.0 |
loli/semisupervisedforests | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
AnasGhrab/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
joshbohde/scikit-learn | examples/linear_model/plot_ridge_path.py | 2 | 1436 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
.. currentmodule:: sklearn.linear_model
Shows the effect of collinearity in the coefficients or the
:class:`Ridge`. At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD Style.
print __doc__
import numpy as np
import pylab as pl
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
################################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
################################################################################
# Display results
ax = pl.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
pl.xlabel('alpha')
pl.ylabel('weights')
pl.title('Ridge coefficients as a function of the regularization')
pl.axis('tight')
pl.show()
| bsd-3-clause |
shanot/imp | modules/pmi/pyext/src/analysis.py | 1 | 106818 | #!/usr/bin/env python
"""@namespace IMP.pmi.analysis
Tools for clustering and cluster analysis
"""
from __future__ import print_function
import IMP
import IMP.algebra
import IMP.em
import IMP.pmi
import IMP.pmi.tools
import IMP.pmi.output
import IMP.rmf
import RMF
import IMP.pmi.analysis
from operator import itemgetter
from copy import deepcopy
from math import log,sqrt
import itertools
import numpy as np
class Alignment(object):
"""Performs alignment and RMSD calculation for two sets of coordinates
The class also takes into account non-equal stoichiometry of the proteins.
If this is the case, the protein names of proteins in multiple copies
should be specified in the following form:
nameA..1, nameA..2 (note two dots).
"""
def __init__(self, template, query, weights=None):
"""Constructor.
@param query {'p1':coords(L,3), 'p2':coords(L,3)}
@param template {'p1':coords(L,3), 'p2':coords(L,3)}
@param weights optional weights for each set of coordinates
"""
self.query = query
self.template = template
self.weights=weights
if len(self.query.keys()) != len(self.template.keys()):
raise ValueError('''the number of proteins
in template and query does not match!''')
def permute(self):
# get unique protein names for each protein
# this is, unfortunately, expecting that names are all 'Molname..X'
# where X is different for each copy.
self.proteins = sorted(self.query.keys())
prots_uniq = [i.split('..')[0] for i in self.proteins]
# for each unique name, store list of permutations
# e.g. for keys A..1,A..2 store P[A] = [[A..1,A..2],[A..2,A..1]]
# then store the product: [[[A1,A2],[B1,B2]],[[A1,A2],[B2,B1]],
# [[A2,A1],[B1,B2]],[[A2,A1],[B2,B1]]]
P = {}
for p in prots_uniq:
np = prots_uniq.count(p)
copies = [i for i in self.proteins if i.split('..')[0] == p]
prmts = list(itertools.permutations(copies, len(copies)))
P[p] = prmts
self.P = P
self.Product = list(itertools.product(*P.values()))
def get_rmsd(self):
self.permute()
template_xyz = []
weights = []
torder = sum([list(i) for i in self.Product[0]], [])
for t in torder:
template_xyz += [IMP.algebra.Vector3D(i) for i in self.template[t]]
if self.weights is not None:
weights += [i for i in self.weights[t]]
#template_xyz = np.array(template_xyz)
self.rmsd = 10000000000.
for comb in self.Product:
order = sum([list(i) for i in comb], [])
query_xyz = []
for p in order:
query_xyz += [IMP.algebra.Vector3D(i) for i in self.query[p]]
#query_xyz = np.array(query_xyz)
#if len(template_xyz) != len(query_xyz):
# print '''Alignment.get_rmsd: ERROR: the number of coordinates
# in template and query does not match!'''
# exit()
if self.weights is not None:
dist=IMP.algebra.get_weighted_rmsd(template_xyz, query_xyz, weights)
else:
dist=IMP.algebra.get_rmsd(template_xyz, query_xyz)
#dist = sqrt(
# sum(np.diagonal(cdist(template_xyz, query_xyz) ** 2)) / len(template_xyz))
if dist < self.rmsd:
self.rmsd = dist
return self.rmsd
def align(self):
from scipy.spatial.distance import cdist
self.permute()
# create flat coordinate list from template in standard order
# then loop through the permutations and try to align and get RMSD
# should allow you to try all mappings within each protein
template_xyz = []
torder = sum([list(i) for i in self.Product[0]], [])
for t in torder:
template_xyz += [IMP.algebra.Vector3D(i) for i in self.template[t]]
# template_xyz = np.array(template_xyz)
self.rmsd, Transformation = 10000000000., ''
# then for each permutation, get flat list of coords and get RMSD
for comb in self.Product:
order = sum([list(i) for i in comb], [])
query_xyz = []
for p in order:
query_xyz += [IMP.algebra.Vector3D(i) for i in self.query[p]]
#query_xyz = np.array(query_xyz)
if len(template_xyz) != len(query_xyz):
raise ValueError('''the number of coordinates
in template and query does not match!''')
transformation = IMP.algebra.get_transformation_aligning_first_to_second(
query_xyz,
template_xyz)
query_xyz_tr = [transformation.get_transformed(n)
for n in query_xyz]
dist = sqrt(
sum(np.diagonal(cdist(template_xyz, query_xyz_tr) ** 2)) / len(template_xyz))
if dist < self.rmsd:
self.rmsd = dist
Transformation = transformation
# return the transformation
return (self.rmsd, Transformation)
# TEST for the alignment ###
"""
Proteins = {'a..1':np.array([np.array([-1.,1.])]),
'a..2':np.array([np.array([1.,1.,])]),
'a..3':np.array([np.array([-2.,1.])]),
'b':np.array([np.array([0.,-1.])]),
'c..1':np.array([np.array([-1.,-1.])]),
'c..2':np.array([np.array([1.,-1.])]),
'd':np.array([np.array([0.,0.])]),
'e':np.array([np.array([0.,1.])])}
Ali = Alignment(Proteins, Proteins)
Ali.permute()
if Ali.get_rmsd() == 0.0: print 'successful test!'
else: print 'ERROR!'; exit()
"""
# ----------------------------------
class Violations(object):
def __init__(self, filename):
self.violation_thresholds = {}
self.violation_counts = {}
data = open(filename)
D = data.readlines()
data.close()
for d in D:
d = d.strip().split()
self.violation_thresholds[d[0]] = float(d[1])
def get_number_violated_restraints(self, rsts_dict):
num_violated = 0
for rst in self.violation_thresholds:
if rst not in rsts_dict:
continue # print rst;
if float(rsts_dict[rst]) > self.violation_thresholds[rst]:
num_violated += 1
if rst not in self.violation_counts:
self.violation_counts[rst] = 1
else:
self.violation_counts[rst] += 1
return num_violated
# ----------------------------------
class Clustering(object):
"""A class to cluster structures.
Uses scipy's cdist function to compute distance matrices
and sklearn's kmeans clustering module.
"""
def __init__(self,rmsd_weights=None):
"""Constructor.
@param rmsd_weights Flat list of weights for each particle
(if they're coarse)
"""
try:
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.number_of_processes = self.comm.size
except ImportError:
self.number_of_processes = 1
self.rank = 0
self.all_coords = {}
self.structure_cluster_ids = None
self.tmpl_coords = None
self.rmsd_weights=rmsd_weights
def set_template(self, part_coords):
self.tmpl_coords = part_coords
def fill(self, frame, Coords):
"""Add coordinates for a single model."""
self.all_coords[frame] = Coords
def dist_matrix(self):
self.model_list_names = list(self.all_coords.keys())
self.model_indexes = list(range(len(self.model_list_names)))
self.model_indexes_dict = dict(
list(zip(self.model_list_names, self.model_indexes)))
model_indexes_unique_pairs = list(itertools.combinations(self.model_indexes, 2))
my_model_indexes_unique_pairs = IMP.pmi.tools.chunk_list_into_segments(
model_indexes_unique_pairs,
self.number_of_processes)[self.rank]
print("process %s assigned with %s pairs" % (str(self.rank), str(len(my_model_indexes_unique_pairs))))
(raw_distance_dict, self.transformation_distance_dict) = self.matrix_calculation(self.all_coords,
self.tmpl_coords,
my_model_indexes_unique_pairs)
if self.number_of_processes > 1:
raw_distance_dict = IMP.pmi.tools.scatter_and_gather(
raw_distance_dict)
pickable_transformations = self.get_pickable_transformation_distance_dict(
)
pickable_transformations = IMP.pmi.tools.scatter_and_gather(
pickable_transformations)
self.set_transformation_distance_dict_from_pickable(
pickable_transformations)
self.raw_distance_matrix = np.zeros(
(len(self.model_list_names), len(self.model_list_names)))
for item in raw_distance_dict:
(f1, f2) = item
self.raw_distance_matrix[f1, f2] = raw_distance_dict[item]
self.raw_distance_matrix[f2, f1] = raw_distance_dict[item]
def get_dist_matrix(self):
return self.raw_distance_matrix
def do_cluster(self, number_of_clusters,seed=None):
"""Run K-means clustering
@param number_of_clusters Num means
@param seed the random seed
"""
from sklearn.cluster import KMeans
if seed is not None:
np.random.seed(seed)
try:
# check whether we have the right version of sklearn
kmeans = KMeans(n_clusters=number_of_clusters)
except TypeError:
# sklearn older than 0.12
kmeans = KMeans(k=number_of_clusters)
kmeans.fit_predict(self.raw_distance_matrix)
self.structure_cluster_ids = kmeans.labels_
def get_pickable_transformation_distance_dict(self):
pickable_transformations = {}
for label in self.transformation_distance_dict:
tr = self.transformation_distance_dict[label]
trans = tuple(tr.get_translation())
rot = tuple(tr.get_rotation().get_quaternion())
pickable_transformations[label] = (rot, trans)
return pickable_transformations
def set_transformation_distance_dict_from_pickable(
self,
pickable_transformations):
self.transformation_distance_dict = {}
for label in pickable_transformations:
tr = pickable_transformations[label]
trans = IMP.algebra.Vector3D(tr[1])
rot = IMP.algebra.Rotation3D(tr[0])
self.transformation_distance_dict[
label] = IMP.algebra.Transformation3D(rot, trans)
def save_distance_matrix_file(self, file_name='cluster.rawmatrix.pkl'):
import pickle
outf = open(file_name + ".data", 'wb')
# to pickle the transformation dictionary
# you have to save the arrays correposnding to
# the transformations
pickable_transformations = self.get_pickable_transformation_distance_dict(
)
pickle.dump(
(self.structure_cluster_ids,
self.model_list_names,
pickable_transformations),
outf)
np.save(file_name + ".npy", self.raw_distance_matrix)
def load_distance_matrix_file(self, file_name='cluster.rawmatrix.pkl'):
import pickle
inputf = open(file_name + ".data", 'rb')
(self.structure_cluster_ids, self.model_list_names,
pickable_transformations) = pickle.load(inputf)
inputf.close()
self.raw_distance_matrix = np.load(file_name + ".npy")
self.set_transformation_distance_dict_from_pickable(
pickable_transformations)
self.model_indexes = list(range(len(self.model_list_names)))
self.model_indexes_dict = dict(
list(zip(self.model_list_names, self.model_indexes)))
def plot_matrix(self, figurename="clustermatrix.pdf"):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pylab as pl
from scipy.cluster import hierarchy as hrc
fig = pl.figure(figsize=(10,8))
ax = fig.add_subplot(212)
dendrogram = hrc.dendrogram(
hrc.linkage(self.raw_distance_matrix),
color_threshold=7,
no_labels=True)
leaves_order = dendrogram['leaves']
ax.set_xlabel('Model')
ax.set_ylabel('RMSD [Angstroms]')
ax2 = fig.add_subplot(221)
cax = ax2.imshow(
self.raw_distance_matrix[leaves_order,
:][:,
leaves_order],
interpolation='nearest')
cb = fig.colorbar(cax)
cb.set_label('RMSD [Angstroms]')
ax2.set_xlabel('Model')
ax2.set_ylabel('Model')
pl.savefig(figurename, dpi=300)
pl.close(fig)
def get_model_index_from_name(self, name):
return self.model_indexes_dict[name]
def get_cluster_labels(self):
# this list
return list(set(self.structure_cluster_ids))
def get_number_of_clusters(self):
return len(self.get_cluster_labels())
def get_cluster_label_indexes(self, label):
return (
[i for i, l in enumerate(self.structure_cluster_ids) if l == label]
)
def get_cluster_label_names(self, label):
return (
[self.model_list_names[i]
for i in self.get_cluster_label_indexes(label)]
)
def get_cluster_label_average_rmsd(self, label):
indexes = self.get_cluster_label_indexes(label)
if len(indexes) > 1:
sub_distance_matrix = self.raw_distance_matrix[
indexes, :][:, indexes]
average_rmsd = np.sum(sub_distance_matrix) / \
(len(sub_distance_matrix)
** 2 - len(sub_distance_matrix))
else:
average_rmsd = 0.0
return average_rmsd
def get_cluster_label_size(self, label):
return len(self.get_cluster_label_indexes(label))
def get_transformation_to_first_member(
self,
cluster_label,
structure_index):
reference = self.get_cluster_label_indexes(cluster_label)[0]
return self.transformation_distance_dict[(reference, structure_index)]
def matrix_calculation(self, all_coords, template_coords, list_of_pairs):
model_list_names = list(all_coords.keys())
rmsd_protein_names = list(all_coords[model_list_names[0]].keys())
raw_distance_dict = {}
transformation_distance_dict = {}
if template_coords is None:
do_alignment = False
else:
do_alignment = True
alignment_template_protein_names = list(template_coords.keys())
for (f1, f2) in list_of_pairs:
if not do_alignment:
# here we only get the rmsd,
# we need that for instance when you want to cluster conformations
# globally, eg the EM map is a reference
transformation = IMP.algebra.get_identity_transformation_3d()
coords_f1 = dict([(pr, all_coords[model_list_names[f1]][pr])
for pr in rmsd_protein_names])
coords_f2 = {}
for pr in rmsd_protein_names:
coords_f2[pr] = all_coords[model_list_names[f2]][pr]
Ali = Alignment(coords_f1, coords_f2, self.rmsd_weights)
rmsd = Ali.get_rmsd()
elif do_alignment:
# here we actually align the conformations first
# and than calculate the rmsd. We need that when the
# protein(s) is the reference
coords_f1 = dict([(pr, all_coords[model_list_names[f1]][pr])
for pr in alignment_template_protein_names])
coords_f2 = dict([(pr, all_coords[model_list_names[f2]][pr])
for pr in alignment_template_protein_names])
Ali = Alignment(coords_f1, coords_f2)
template_rmsd, transformation = Ali.align()
# here we calculate the rmsd
# we will align two models based n the nuber of subunits provided
# and transform coordinates of model 2 to model 1
coords_f1 = dict([(pr, all_coords[model_list_names[f1]][pr])
for pr in rmsd_protein_names])
coords_f2 = {}
for pr in rmsd_protein_names:
coords_f2[pr] = [transformation.get_transformed(
i) for i in all_coords[model_list_names[f2]][pr]]
Ali = Alignment(coords_f1, coords_f2, self.rmsd_weights)
rmsd = Ali.get_rmsd()
raw_distance_dict[(f1, f2)] = rmsd
raw_distance_dict[(f2, f1)] = rmsd
transformation_distance_dict[(f1, f2)] = transformation
transformation_distance_dict[(f2, f1)] = transformation
return raw_distance_dict, transformation_distance_dict
class RMSD(object):
"""Compute the RMSD (without alignment) taking into account the copy ambiguity.
To be used with pmi2 hierarchies. Can be used for instance as follows:
rmsd=IMP.pmi.analysis.RMSD(hier,hier,[mol.get_name() for mol in mols],dynamic0=True,dynamic1=False)
output_objects.append(rmsd)
before shuffling the coordinates
"""
def __init__(self,hier0,hier1,molnames,label="None",dynamic0=True,dynamic1=True,metric=IMP.algebra.get_rmsd):
"""
@param hier0 first input hierarchy
@param hier1 second input hierarchy
@param molname the names of the molecules used for the RMSD
@dynamic0 if True stores the decorators XYZ and coordinates of hier0 can be update. If false coordinates are static (stored in Vector3Ds)
and will never be updated
@dynamic1 same as above
metric what metric should be used
"""
self.moldict0,self.molcoords0,self.mol_XYZs0=self.get_moldict_coorddict(hier0,molnames)
self.moldict1,self.molcoords1,self.mol_XYZs1=self.get_moldict_coorddict(hier1,molnames)
self.dynamic0=dynamic0
self.dynamic1=dynamic1
self.metric=metric
self.label=label
def get_moldict_coorddict(self,hier,molnames):
"""return data structure for the RMSD calculation"""
moldict={}
mol_coords={}
mol_XYZs={}
for mol in IMP.pmi.tools.get_molecules(hier):
name=mol.get_name()
if name not in molnames:
continue
parts=True
mol_coords[mol]=[]
mol_XYZs[mol]=[]
i=1
while parts:
sel=IMP.atom.Selection(mol,residue_index=i,representation_type=IMP.atom.BALLS,resolution=1)
parts=sel.get_selected_particles()
if parts:
mol_coords[mol].append(IMP.core.XYZ(parts[0]).get_coordinates())
mol_XYZs[mol].append(IMP.core.XYZ(parts[0]))
i=i+1
if name in moldict:
moldict[name].append(mol)
else:
moldict[name]=[mol]
return moldict, mol_coords, mol_XYZs
def get_rmsd_and_assigments(self):
best_orders=[]
total_rmsd=0
total_N=0
best_assignments=[]
rmsd_dict={}
for molname, ref_mols in self.moldict1.items():
ref_coords=[]
for ref_mol in ref_mols:
if self.dynamic1:
coord1=[XYZ.get_coordinates() for XYZ in self.mol_XYZs1[ref_mol]]
else:
coord1=self.molcoords1[ref_mol]
ref_coords+=coord1
rmsd=[]
rmf_mols_list=[]
for rmf_mols in itertools.permutations(self.moldict0[molname]):
rmf_coords=[]
for rmf_mol in rmf_mols:
if self.dynamic0:
coord0=[XYZ.get_coordinates() for XYZ in self.mol_XYZs0[rmf_mol]]
else:
coord0=self.molcoords0[rmf_mol]
rmf_coords+=coord0
rmsd.append(IMP.algebra.get_rmsd(ref_coords, rmf_coords))
rmf_mols_list.append(rmf_mols)
m=min(rmsd)
rmf_mols_best_order=rmf_mols_list[rmsd.index(m)]
for n, (ref_mol,rmf_mol) in enumerate(zip(ref_mols,rmf_mols_best_order)):
best_assignments.append((rmf_mol,ref_mol))
if self.dynamic0:
coord0=[XYZ.get_coordinates() for XYZ in self.mol_XYZs0[rmf_mol]]
else:
coord0=self.molcoords0[rmf_mol]
if self.dynamic1:
coord1=[XYZ.get_coordinates() for XYZ in self.mol_XYZs1[ref_mol]]
else:
coord1=self.molcoords1[ref_mol]
rmsd_pair=self.metric(coord1, coord0)
N=len(self.molcoords1[ref_mol])
total_N+=N
total_rmsd+=rmsd_pair*rmsd_pair*N
rmsd_dict[ref_mol]=rmsd_pair
total_rmsd = sqrt(total_rmsd/total_N)
return total_rmsd,best_assignments
def get_output(self):
"""Returns output for IMP.pmi.output.Output object"""
total_rmsd,best_assignments=self.get_rmsd_and_assigments()
assignments_out=[]
for rmf_mol,ref_mol in best_assignments:
ref_name=ref_mol.get_name()
ref_copy=IMP.atom.Copy(ref_mol).get_copy_index()
rmf_name=rmf_mol.get_name()
rmf_copy=IMP.atom.Copy(rmf_mol).get_copy_index()
assignments_out.append(rmf_name+"."+str(rmf_copy)+"->"+ref_name+"."+str(ref_copy))
return {"RMSD_"+self.label:str(total_rmsd),"RMSD_assignments_"+self.label:str(assignments_out)}
class Precision(object):
"""A class to evaluate the precision of an ensemble.
Also can evaluate the cross-precision of multiple ensembles.
Supports MPI for coordinate reading.
Recommended procedure:
-# initialize object and pass the selection for evaluating precision
-# call add_structures() to read in the data (specify group name)
-# call get_precision() to evaluate inter/intra precision
-# call get_rmsf() to evaluate within-group fluctuations
"""
def __init__(self,model,
resolution=1,
selection_dictionary={}):
"""Constructor.
@param model The IMP Model
@param resolution Use 1 or 10 (kluge: requires that "_Res:X" is
part of the hier name)
@param selection_dictionary Dictionary where keys are names for
selections and values are selection tuples for scoring
precision. "All" is automatically made as well
"""
try:
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.number_of_processes = self.comm.size
except ImportError:
self.number_of_processes = 1
self.rank = 0
self.styles = ['pairwise_rmsd','pairwise_drmsd_k','pairwise_drmsd_Q',
'pairwise_drms_k','pairwise_rmsd','drmsd_from_center']
self.style = 'pairwise_drmsd_k'
self.structures_dictionary = {}
self.reference_structures_dictionary = {}
self.prots = []
self.protein_names = None
self.len_particles_resolution_one = None
self.model = model
self.rmf_names_frames = {}
self.reference_rmf_names_frames = None
self.reference_structure = None
self.reference_prot = None
self.selection_dictionary = selection_dictionary
self.threshold = 40.0
self.residue_particle_index_map = None
self.prots = None
if resolution in [1,10]:
self.resolution = resolution
else:
raise KeyError("Currently only allow resolution 1 or 10")
def _get_structure(self,rmf_frame_index,rmf_name):
"""Read an RMF file and return the particles"""
rh = RMF.open_rmf_file_read_only(rmf_name)
if not self.prots:
print("getting coordinates for frame %i rmf file %s" % (rmf_frame_index, rmf_name))
self.prots = IMP.rmf.create_hierarchies(rh, self.model)
IMP.rmf.load_frame(rh, RMF.FrameID(rmf_frame_index))
else:
print("linking coordinates for frame %i rmf file %s" % (rmf_frame_index, rmf_name))
IMP.rmf.link_hierarchies(rh, self.prots)
IMP.rmf.load_frame(rh, RMF.FrameID(rmf_frame_index))
del rh
if self.resolution==1:
particle_dict = get_particles_at_resolution_one(self.prots[0])
elif self.resolution==10:
particle_dict = get_particles_at_resolution_ten(self.prots[0])
protein_names = list(particle_dict.keys())
particles_resolution_one = []
for k in particle_dict:
particles_resolution_one += (particle_dict[k])
if self.protein_names==None:
self.protein_names = protein_names
else:
if self.protein_names!=protein_names:
print("Error: the protein names of the new coordinate set is not compatible with the previous one")
if self.len_particles_resolution_one==None:
self.len_particles_resolution_one = len(particles_resolution_one)
else:
if self.len_particles_resolution_one!=len(particles_resolution_one):
raise ValueError("the new coordinate set is not compatible with the previous one")
return particles_resolution_one
def add_structure(self,
rmf_name,
rmf_frame_index,
structure_set_name,
setup_index_map=False):
""" Read a structure into the ensemble and store (as coordinates).
@param rmf_name The name of the RMF file
@param rmf_frame_index The frame to read
@param structure_set_name Name for the set that includes this structure
(e.g. "cluster 1")
@param setup_index_map if requested, set up a dictionary to help
find residue indexes
"""
# decide where to put this structure
if structure_set_name in self.structures_dictionary:
cdict = self.structures_dictionary[structure_set_name]
rmflist = self.rmf_names_frames[structure_set_name]
else:
self.structures_dictionary[structure_set_name]={}
self.rmf_names_frames[structure_set_name]=[]
cdict = self.structures_dictionary[structure_set_name]
rmflist = self.rmf_names_frames[structure_set_name]
# read the particles
try:
particles_resolution_one = self._get_structure(rmf_frame_index,rmf_name)
except ValueError:
print("something wrong with the rmf")
return 0
self.selection_dictionary.update({"All":self.protein_names})
for selection_name in self.selection_dictionary:
selection_tuple = self.selection_dictionary[selection_name]
coords = self._select_coordinates(selection_tuple,particles_resolution_one,self.prots[0])
if selection_name not in cdict:
cdict[selection_name] = [coords]
else:
cdict[selection_name].append(coords)
rmflist.append((rmf_name,rmf_frame_index))
# if requested, set up a dictionary to help find residue indexes
if setup_index_map:
self.residue_particle_index_map = {}
for prot_name in self.protein_names:
self.residue_particle_index_map[prot_name] = \
self._get_residue_particle_index_map(
prot_name,
particles_resolution_one,self.prots[0])
def add_structures(self,
rmf_name_frame_tuples,
structure_set_name):
"""Read a list of RMFs, supports parallel
@param rmf_name_frame_tuples list of (rmf_file_name,frame_number)
@param structure_set_name Name this set of structures (e.g. "cluster.1")
"""
# split up the requested list to read in parallel
my_rmf_name_frame_tuples=IMP.pmi.tools.chunk_list_into_segments(
rmf_name_frame_tuples,self.number_of_processes)[self.rank]
for nfr,tup in enumerate(my_rmf_name_frame_tuples):
rmf_name=tup[0]
rmf_frame_index=tup[1]
# the first frame stores the map between residues and particles
if self.residue_particle_index_map is None:
setup_index_map = True
else:
setup_index_map = False
self.add_structure(rmf_name,
rmf_frame_index,
structure_set_name,
setup_index_map)
# synchronize the structures
if self.number_of_processes > 1:
self.rmf_names_frames=IMP.pmi.tools.scatter_and_gather(self.rmf_names_frames)
if self.rank != 0:
self.comm.send(self.structures_dictionary, dest=0, tag=11)
elif self.rank == 0:
for i in range(1, self.number_of_processes):
data_tmp = self.comm.recv(source=i, tag=11)
for key in self.structures_dictionary:
self.structures_dictionary[key].update(data_tmp[key])
for i in range(1, self.number_of_processes):
self.comm.send(self.structures_dictionary, dest=i, tag=11)
if self.rank != 0:
self.structures_dictionary = self.comm.recv(source=0, tag=11)
def _get_residue_particle_index_map(self,prot_name,structure,hier):
# Creates map from all particles to residue numbers
residue_particle_index_map = []
if IMP.pmi.get_is_canonical(hier):
s = IMP.atom.Selection(hier,molecules=[prot_name],
resolution=1)
else:
s = IMP.atom.Selection(hier,molecules=[prot_name])
all_selected_particles = s.get_selected_particles()
intersection = list(set(all_selected_particles) & set(structure))
sorted_intersection = IMP.pmi.tools.sort_by_residues(intersection)
for p in sorted_intersection:
residue_particle_index_map.append(IMP.pmi.tools.get_residue_indexes(p))
return residue_particle_index_map
def _select_coordinates(self,tuple_selections,structure,prot):
selected_coordinates=[]
for t in tuple_selections:
if type(t)==tuple and len(t)==3:
if IMP.pmi.get_is_canonical(prot):
s = IMP.atom.Selection(prot,molecules=[t[2]],residue_indexes=range(t[0],t[1]+1),
resolution=1)
else:
s = IMP.atom.Selection(prot,molecules=[t[2]],residue_indexes=range(t[0],t[1]+1))
all_selected_particles = s.get_selected_particles()
intersection = list(set(all_selected_particles) & set(structure))
sorted_intersection = IMP.pmi.tools.sort_by_residues(intersection)
cc = [tuple(IMP.core.XYZ(p).get_coordinates()) for p in sorted_intersection]
selected_coordinates += cc
elif type(t)==str:
if IMP.pmi.get_is_canonical(prot):
s = IMP.atom.Selection(prot,molecules=[t],resolution=1)
else:
s = IMP.atom.Selection(prot,molecules=[t])
all_selected_particles = s.get_selected_particles()
intersection = list(set(all_selected_particles) & set(structure))
sorted_intersection = IMP.pmi.tools.sort_by_residues(intersection)
cc = [tuple(IMP.core.XYZ(p).get_coordinates()) for p in sorted_intersection]
selected_coordinates += cc
else:
raise ValueError("Selection error")
return selected_coordinates
def set_threshold(self,threshold):
self.threshold = threshold
def _get_distance(self,
structure_set_name1,
structure_set_name2,
selection_name,
index1,
index2):
""" Compute distance between structures with various metrics """
c1 = self.structures_dictionary[structure_set_name1][selection_name][index1]
c2 = self.structures_dictionary[structure_set_name2][selection_name][index2]
coordinates1 = [IMP.algebra.Vector3D(c) for c in c1]
coordinates2 = [IMP.algebra.Vector3D(c) for c in c2]
if self.style=='pairwise_drmsd_k':
distance=IMP.atom.get_drmsd(coordinates1,coordinates2)
if self.style=='pairwise_drms_k':
distance=IMP.atom.get_drms(coordinates1,coordinates2)
if self.style=='pairwise_drmsd_Q':
distance=IMP.atom.get_drmsd_Q(coordinates1,coordinates2,self.threshold)
if self.style=='pairwise_rmsd':
distance=IMP.algebra.get_rmsd(coordinates1,coordinates2)
return distance
def _get_particle_distances(self,structure_set_name1,structure_set_name2,
selection_name,index1,index2):
c1 = self.structures_dictionary[structure_set_name1][selection_name][index1]
c2 = self.structures_dictionary[structure_set_name2][selection_name][index2]
coordinates1 = [IMP.algebra.Vector3D(c) for c in c1]
coordinates2 = [IMP.algebra.Vector3D(c) for c in c2]
distances=[np.linalg.norm(a-b) for (a,b) in zip(coordinates1,coordinates2)]
return distances
def get_precision(self,
structure_set_name1,
structure_set_name2,
outfile=None,
skip=1,
selection_keywords=None):
""" Evaluate the precision of two named structure groups. Supports MPI.
When the structure_set_name1 is different from the structure_set_name2,
this evaluates the cross-precision (average pairwise distances).
@param outfile Name of the precision output file
@param structure_set_name1 string name of the first structure set
@param structure_set_name2 string name of the second structure set
@param skip analyze every (skip) structure for the distance matrix calculation
@param selection_keywords Specify the selection name you want to calculate on.
By default this is computed for everything you provided in the constructor,
plus all the subunits together.
"""
if selection_keywords is None:
sel_keys = list(self.selection_dictionary.keys())
else:
for k in selection_keywords:
if k not in self.selection_dictionary:
raise KeyError("you are trying to find named selection " \
+ k + " which was not requested in the constructor")
sel_keys = selection_keywords
if outfile is not None:
of = open(outfile,"w")
centroid_index = 0
for selection_name in sel_keys:
number_of_structures_1 = len(self.structures_dictionary[structure_set_name1][selection_name])
number_of_structures_2 = len(self.structures_dictionary[structure_set_name2][selection_name])
distances={}
structure_pointers_1 = list(range(0,number_of_structures_1,skip))
structure_pointers_2 = list(range(0,number_of_structures_2,skip))
pair_combination_list = list(itertools.product(structure_pointers_1,structure_pointers_2))
if len(pair_combination_list)==0:
raise ValueError("no structure selected. Check the skip parameter.")
# compute pairwise distances in parallel
my_pair_combination_list = IMP.pmi.tools.chunk_list_into_segments(
pair_combination_list,self.number_of_processes)[self.rank]
my_length = len(my_pair_combination_list)
for n,pair in enumerate(my_pair_combination_list):
progression = int(float(n)/my_length*100.0)
distances[pair] = self._get_distance(structure_set_name1,structure_set_name2,
selection_name,pair[0],pair[1])
if self.number_of_processes > 1:
distances = IMP.pmi.tools.scatter_and_gather(distances)
# Finally compute distance to centroid
if self.rank == 0:
if structure_set_name1==structure_set_name2:
structure_pointers = structure_pointers_1
number_of_structures = number_of_structures_1
# calculate the distance from the first centroid
# and determine the centroid
distance = 0.0
distances_to_structure = {}
distances_to_structure_normalization = {}
for n in structure_pointers:
distances_to_structure[n] = 0.0
distances_to_structure_normalization[n]=0
for k in distances:
distance += distances[k]
distances_to_structure[k[0]] += distances[k]
distances_to_structure[k[1]] += distances[k]
distances_to_structure_normalization[k[0]] += 1
distances_to_structure_normalization[k[1]] += 1
for n in structure_pointers:
distances_to_structure[n] = distances_to_structure[n]/distances_to_structure_normalization[n]
min_distance = min([distances_to_structure[n] for n in distances_to_structure])
centroid_index = [k for k, v in distances_to_structure.items() if v == min_distance][0]
centroid_rmf_name = self.rmf_names_frames[structure_set_name1][centroid_index]
centroid_distance = 0.0
distance_list = []
for n in range(number_of_structures):
dist = self._get_distance(structure_set_name1,structure_set_name1,
selection_name,centroid_index,n)
centroid_distance += dist
distance_list.append(dist)
#pairwise_distance=distance/len(distances.keys())
centroid_distance /= number_of_structures
#average_centroid_distance=sum(distances_to_structure)/len(distances_to_structure)
if outfile is not None:
of.write(str(selection_name)+" "+structure_set_name1+
" average centroid distance "+str(centroid_distance)+"\n")
of.write(str(selection_name)+" "+structure_set_name1+
" centroid index "+str(centroid_index)+"\n")
of.write(str(selection_name)+" "+structure_set_name1+
" centroid rmf name "+str(centroid_rmf_name)+"\n")
of.write(str(selection_name)+" "+structure_set_name1+
" median centroid distance "+str(np.median(distance_list))+"\n")
average_pairwise_distances=sum(distances.values())/len(list(distances.values()))
if outfile is not None:
of.write(str(selection_name)+" "+structure_set_name1+" "+structure_set_name2+
" average pairwise distance "+str(average_pairwise_distances)+"\n")
if outfile is not None:
of.close()
return centroid_index
def get_rmsf(self,
structure_set_name,
outdir="./",
skip=1,
set_plot_yaxis_range=None):
""" Calculate the residue mean square fluctuations (RMSF).
Automatically outputs as data file and pdf
@param structure_set_name Which structure set to calculate RMSF for
@param outdir Where to write the files
@param skip Skip this number of structures
@param set_plot_yaxis_range In case you need to change the plot
"""
# get the centroid structure for the whole complex
centroid_index = self.get_precision(structure_set_name,
structure_set_name,
outfile=None,
skip=skip)
if self.rank==0:
for sel_name in self.protein_names:
self.selection_dictionary.update({sel_name:[sel_name]})
try:
number_of_structures = len(self.structures_dictionary[structure_set_name][sel_name])
except KeyError:
# that protein was not included in the selection
continue
rpim = self.residue_particle_index_map[sel_name]
outfile = outdir+"/rmsf."+sel_name+".dat"
of = open(outfile,"w")
residue_distances = {}
residue_nblock = {}
for index in range(number_of_structures):
distances = self._get_particle_distances(structure_set_name,
structure_set_name,
sel_name,
centroid_index,index)
for nblock,block in enumerate(rpim):
for residue_number in block:
residue_nblock[residue_number] = nblock
if residue_number not in residue_distances:
residue_distances[residue_number] = [distances[nblock]]
else:
residue_distances[residue_number].append(distances[nblock])
residues = []
rmsfs = []
for rn in residue_distances:
residues.append(rn)
rmsf = np.std(residue_distances[rn])
rmsfs.append(rmsf)
of.write(str(rn)+" "+str(residue_nblock[rn])+" "+str(rmsf)+"\n")
IMP.pmi.output.plot_xy_data(residues,rmsfs,title=sel_name,
out_fn=outdir+"/rmsf."+sel_name,display=False,
set_plot_yaxis_range=set_plot_yaxis_range,
xlabel='Residue Number',ylabel='Standard error')
of.close()
def set_reference_structure(self,rmf_name,rmf_frame_index):
"""Read in a structure used for reference computation.
Needed before calling get_average_distance_wrt_reference_structure()
@param rmf_name The RMF file to read the reference
@param rmf_frame_index The index in that file
"""
particles_resolution_one = self._get_structure(rmf_frame_index,rmf_name)
self.reference_rmf_names_frames = (rmf_name,rmf_frame_index)
for selection_name in self.selection_dictionary:
selection_tuple = self.selection_dictionary[selection_name]
coords = self._select_coordinates(selection_tuple,
particles_resolution_one,self.prots[0])
self.reference_structures_dictionary[selection_name] = coords
def get_rmsd_wrt_reference_structure_with_alignment(self,structure_set_name,alignment_selection_key):
"""First align then calculate RMSD
@param structure_set_name: the name of the structure set
@param alignment_selection: the key containing the selection tuples needed to make the alignment stored in self.selection_dictionary
@return: for each structure in the structure set, returns the rmsd
"""
if self.reference_structures_dictionary=={}:
print("Cannot compute until you set a reference structure")
return
align_reference_coordinates = self.reference_structures_dictionary[alignment_selection_key]
align_coordinates = self.structures_dictionary[structure_set_name][alignment_selection_key]
transformations = []
for c in align_coordinates:
Ali = IMP.pmi.analysis.Alignment({"All":align_reference_coordinates}, {"All":c})
transformation = Ali.align()[1]
transformations.append(transformation)
for selection_name in self.selection_dictionary:
reference_coordinates = self.reference_structures_dictionary[selection_name]
coordinates2 = [IMP.algebra.Vector3D(c) for c in reference_coordinates]
distances = []
for n,sc in enumerate(self.structures_dictionary[structure_set_name][selection_name]):
coordinates1 = [transformations[n].get_transformed(IMP.algebra.Vector3D(c)) for c in sc]
distance = IMP.algebra.get_rmsd(coordinates1,coordinates2)
distances.append(distance)
print(selection_name,"average rmsd",sum(distances)/len(distances),"median",self._get_median(distances),"minimum distance",min(distances))
def _get_median(self,list_of_values):
return np.median(np.array(list_of_values))
def get_average_distance_wrt_reference_structure(self,structure_set_name):
"""Compare the structure set to the reference structure.
@param structure_set_name The structure set to compute this on
@note First call set_reference_structure()
"""
ret = {}
if self.reference_structures_dictionary=={}:
print("Cannot compute until you set a reference structure")
return
for selection_name in self.selection_dictionary:
reference_coordinates = self.reference_structures_dictionary[selection_name]
coordinates2 = [IMP.algebra.Vector3D(c) for c in reference_coordinates]
distances = []
for sc in self.structures_dictionary[structure_set_name][selection_name]:
coordinates1 = [IMP.algebra.Vector3D(c) for c in sc]
if self.style=='pairwise_drmsd_k':
distance = IMP.atom.get_drmsd(coordinates1,coordinates2)
if self.style=='pairwise_drms_k':
distance = IMP.atom.get_drms(coordinates1,coordinates2)
if self.style=='pairwise_drmsd_Q':
distance = IMP.atom.get_drmsd_Q(coordinates1,coordinates2,self.threshold)
if self.style=='pairwise_rmsd':
distance = IMP.algebra.get_rmsd(coordinates1,coordinates2)
distances.append(distance)
print(selection_name,"average distance",sum(distances)/len(distances),"minimum distance",min(distances),'nframes',len(distances))
ret[selection_name] = {'average_distance':sum(distances)/len(distances),'minimum_distance':min(distances)}
return ret
def get_coordinates(self):
pass
def set_precision_style(self, style):
if style in self.styles:
self.style=style
else:
raise ValueError("No such style")
class GetModelDensity(object):
"""Compute mean density maps from structures.
Keeps a dictionary of density maps,
keys are in the custom ranges. When you call add_subunits_density, it adds
particle coordinates to the existing density maps.
"""
def __init__(self, custom_ranges, representation=None, resolution=20.0, voxel=5.0):
"""Constructor.
@param custom_ranges Required. It's a dictionary, keys are the
density component names, values are selection tuples
e.g. {'kin28':[['kin28',1,-1]],
'density_name_1' :[('ccl1')],
'density_name_2' :[(1,142,'tfb3d1'),
(143,700,'tfb3d2')],
@param representation PMI representation, for doing selections.
Not needed if you only pass hierarchies
@param resolution The MRC resolution of the output map (in Angstrom unit)
@param voxel The voxel size for the output map (lower is slower)
"""
self.representation = representation
self.MRCresolution = resolution
self.voxel = voxel
self.densities = {}
self.count_models = 0.0
self.custom_ranges = custom_ranges
def add_subunits_density(self, hierarchy=None):
"""Add a frame to the densities.
@param hierarchy Optionally read the hierarchy from somewhere.
If not passed, will just read the representation.
"""
self.count_models += 1.0
if hierarchy:
part_dict = get_particles_at_resolution_one(hierarchy)
all_particles_by_resolution = []
for name in part_dict:
all_particles_by_resolution += part_dict[name]
for density_name in self.custom_ranges:
parts = []
if hierarchy:
all_particles_by_segments = []
for seg in self.custom_ranges[density_name]:
if not hierarchy:
# when you have a IMP.pmi.representation.Representation class
parts += IMP.tools.select_by_tuple(self.representation,
seg, resolution=1, name_is_ambiguous=False)
else:
# else, when you have a hierarchy, but not a representation
if not IMP.pmi.get_is_canonical(hierarchy):
for h in hierarchy.get_children():
if not IMP.atom.Molecule.get_is_setup(h):
IMP.atom.Molecule.setup_particle(h.get_particle())
if type(seg) == str:
s = IMP.atom.Selection(hierarchy,molecule=seg)
elif type(seg) == tuple and len(seg) == 2:
s = IMP.atom.Selection(
hierarchy, molecule=seg[0],copy_index=seg[1])
elif type(seg) == tuple and len(seg) == 3:
s = IMP.atom.Selection(
hierarchy, molecule=seg[2],residue_indexes=range(seg[0], seg[1] + 1))
elif type(seg) == tuple and len(seg) == 4:
s = IMP.atom.Selection(
hierarchy, molecule=seg[2],residue_indexes=range(seg[0], seg[1] + 1),copy_index=seg[3])
else:
raise Exception('could not understand selection tuple '+str(seg))
all_particles_by_segments += s.get_selected_particles()
if hierarchy:
if IMP.pmi.get_is_canonical(hierarchy):
parts = all_particles_by_segments
else:
parts = list(
set(all_particles_by_segments) & set(all_particles_by_resolution))
self._create_density_from_particles(parts, density_name)
def normalize_density(self):
pass
def _create_density_from_particles(self, ps, name,
kernel_type='GAUSSIAN'):
'''Internal function for adding to densities.
pass XYZR particles with mass and create a density from them.
kernel type options are GAUSSIAN, BINARIZED_SPHERE, and SPHERE.'''
kd = {
'GAUSSIAN': IMP.em.GAUSSIAN,
'BINARIZED_SPHERE': IMP.em.BINARIZED_SPHERE,
'SPHERE': IMP.em.SPHERE}
dmap = IMP.em.SampledDensityMap(ps, self.MRCresolution, self.voxel)
dmap.calcRMS()
dmap.set_was_used(True)
if name not in self.densities:
self.densities[name] = dmap
else:
bbox1 = IMP.em.get_bounding_box(self.densities[name])
bbox2 = IMP.em.get_bounding_box(dmap)
bbox1 += bbox2
dmap3 = IMP.em.create_density_map(bbox1,self.voxel)
dmap3.set_was_used(True)
dmap3.add(dmap)
dmap3.add(self.densities[name])
self.densities[name] = dmap3
def get_density_keys(self):
return list(self.densities.keys())
def get_density(self,name):
"""Get the current density for some component name"""
if name not in self.densities:
return None
else:
return self.densities[name]
def write_mrc(self, path="./",suffix=None):
for density_name in self.densities:
self.densities[density_name].multiply(1. / self.count_models)
if suffix is None:
name=path + "/" + density_name + ".mrc"
else:
name=path + "/" + density_name + "." + suffix + ".mrc"
IMP.em.write_map(
self.densities[density_name],name,
IMP.em.MRCReaderWriter())
class GetContactMap(object):
def __init__(self, distance=15.):
self.distance = distance
self.contactmap = ''
self.namelist = []
self.xlinks = 0
self.XL = {}
self.expanded = {}
self.resmap = {}
def set_prot(self, prot):
from scipy.spatial.distance import cdist
self.prot = prot
self.protnames = []
coords = []
radii = []
namelist = []
particles_dictionary = get_particles_at_resolution_one(self.prot)
for name in particles_dictionary:
residue_indexes = []
for p in particles_dictionary[name]:
print(p.get_name())
residue_indexes += IMP.pmi.tools.get_residue_indexes(p)
if len(residue_indexes) != 0:
self.protnames.append(name)
def get_subunit_coords(self, frame, align=0):
from scipy.spatial.distance import cdist
coords = []
radii = []
namelist = []
test, testr = [], []
for part in self.prot.get_children():
SortedSegments = []
print(part)
for chl in part.get_children():
start = IMP.atom.get_leaves(chl)[0]
end = IMP.atom.get_leaves(chl)[-1]
startres = IMP.atom.Fragment(start).get_residue_indexes()[0]
endres = IMP.atom.Fragment(end).get_residue_indexes()[-1]
SortedSegments.append((chl, startres))
SortedSegments = sorted(SortedSegments, key=itemgetter(1))
for sgmnt in SortedSegments:
for leaf in IMP.atom.get_leaves(sgmnt[0]):
p = IMP.core.XYZR(leaf)
crd = np.array([p.get_x(), p.get_y(), p.get_z()])
coords.append(crd)
radii.append(p.get_radius())
new_name = part.get_name() + '_' + sgmnt[0].get_name() +\
'_' + \
str(IMP.atom.Fragment(leaf)
.get_residue_indexes()[0])
namelist.append(new_name)
self.expanded[new_name] = len(
IMP.atom.Fragment(leaf).get_residue_indexes())
if part.get_name() not in self.resmap:
self.resmap[part.get_name()] = {}
for res in IMP.atom.Fragment(leaf).get_residue_indexes():
self.resmap[part.get_name()][res] = new_name
coords = np.array(coords)
radii = np.array(radii)
if len(self.namelist) == 0:
self.namelist = namelist
self.contactmap = np.zeros((len(coords), len(coords)))
distances = cdist(coords, coords)
distances = (distances - radii).T - radii
distances = distances <= self.distance
self.contactmap += distances
def add_xlinks(
self,
filname,
identification_string='ISDCrossLinkMS_Distance_'):
# 'ISDCrossLinkMS_Distance_interrb_6629-State:0-20:RPS30_218:eIF3j-1-1-0.1_None'
self.xlinks = 1
data = open(filname)
D = data.readlines()
data.close()
for d in D:
if identification_string in d:
d = d.replace(
"_",
" ").replace("-",
" ").replace(":",
" ").split()
t1, t2 = (d[0], d[1]), (d[1], d[0])
if t1 not in self.XL:
self.XL[t1] = [(int(d[2]) + 1, int(d[3]) + 1)]
self.XL[t2] = [(int(d[3]) + 1, int(d[2]) + 1)]
else:
self.XL[t1].append((int(d[2]) + 1, int(d[3]) + 1))
self.XL[t2].append((int(d[3]) + 1, int(d[2]) + 1))
def dist_matrix(self, skip_cmap=0, skip_xl=1, outname=None):
K = self.namelist
M = self.contactmap
C, R = [], []
L = sum(self.expanded.values())
proteins = self.protnames
# exp new
if skip_cmap == 0:
Matrices = {}
proteins = [p.get_name() for p in self.prot.get_children()]
missing = []
for p1 in range(len(proteins)):
for p2 in range(p1, len(proteins)):
pl1, pl2 = max(
self.resmap[proteins[p1]].keys()), max(self.resmap[proteins[p2]].keys())
pn1, pn2 = proteins[p1], proteins[p2]
mtr = np.zeros((pl1 + 1, pl2 + 1))
print('Creating matrix for: ', p1, p2, pn1, pn2, mtr.shape, pl1, pl2)
for i1 in range(1, pl1 + 1):
for i2 in range(1, pl2 + 1):
try:
r1 = K.index(self.resmap[pn1][i1])
r2 = K.index(self.resmap[pn2][i2])
r = M[r1, r2]
mtr[i1 - 1, i2 - 1] = r
except KeyError:
missing.append((pn1, pn2, i1, i2))
pass
Matrices[(pn1, pn2)] = mtr
# add cross-links
if skip_xl == 0:
if self.XL == {}:
raise ValueError("cross-links were not provided, use add_xlinks function!")
Matrices_xl = {}
missing_xl = []
for p1 in range(len(proteins)):
for p2 in range(p1, len(proteins)):
pl1, pl2 = max(
self.resmap[proteins[p1]].keys()), max(self.resmap[proteins[p2]].keys())
pn1, pn2 = proteins[p1], proteins[p2]
mtr = np.zeros((pl1 + 1, pl2 + 1))
flg = 0
try:
xls = self.XL[(pn1, pn2)]
except KeyError:
try:
xls = self.XL[(pn2, pn1)]
flg = 1
except KeyError:
flg = 2
if flg == 0:
print('Creating matrix for: ', p1, p2, pn1, pn2, mtr.shape, pl1, pl2)
for xl1, xl2 in xls:
if xl1 > pl1:
print('X' * 10, xl1, xl2)
xl1 = pl1
if xl2 > pl2:
print('X' * 10, xl1, xl2)
xl2 = pl2
mtr[xl1 - 1, xl2 - 1] = 100
elif flg == 1:
print('Creating matrix for: ', p1, p2, pn1, pn2, mtr.shape, pl1, pl2)
for xl1, xl2 in xls:
if xl1 > pl1:
print('X' * 10, xl1, xl2)
xl1 = pl1
if xl2 > pl2:
print('X' * 10, xl1, xl2)
xl2 = pl2
mtr[xl2 - 1, xl1 - 1] = 100
else:
print('No cross links between: ', pn1, pn2)
Matrices_xl[(pn1, pn2)] = mtr
# expand the matrix to individual residues
#NewM = []
# for x1 in xrange(len(K)):
# lst = []
# for x2 in xrange(len(K)):
# lst += [M[x1,x2]]*self.expanded[K[x2]]
# for i in xrange(self.expanded[K[x1]]): NewM.append(np.array(lst))
#NewM = np.array(NewM)
# make list of protein names and create coordinate lists
C = proteins
# W is the component length list,
# R is the contiguous coordinates list
W, R = [], []
for i, c in enumerate(C):
cl = max(self.resmap[c].keys())
W.append(cl)
if i == 0:
R.append(cl)
else:
R.append(R[-1] + cl)
# start plotting
if outname:
# Don't require a display
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import scipy.sparse as sparse
f = plt.figure()
gs = gridspec.GridSpec(len(W), len(W),
width_ratios=W,
height_ratios=W)
cnt = 0
for x1, r1 in enumerate(R):
if x1 == 0:
s1 = 0
else:
s1 = R[x1 - 1]
for x2, r2 in enumerate(R):
if x2 == 0:
s2 = 0
else:
s2 = R[x2 - 1]
ax = plt.subplot(gs[cnt])
if skip_cmap == 0:
try:
mtr = Matrices[(C[x1], C[x2])]
except KeyError:
mtr = Matrices[(C[x2], C[x1])].T
#cax = ax.imshow(log(NewM[s1:r1,s2:r2] / 1.), interpolation='nearest', vmin=0., vmax=log(NewM.max()))
cax = ax.imshow(
np.log(mtr),
interpolation='nearest',
vmin=0.,
vmax=log(mtr.max()))
ax.set_xticks([])
ax.set_yticks([])
if skip_xl == 0:
try:
mtr = Matrices_xl[(C[x1], C[x2])]
except KeyError:
mtr = Matrices_xl[(C[x2], C[x1])].T
cax = ax.spy(
sparse.csr_matrix(mtr),
markersize=10,
color='white',
linewidth=100,
alpha=0.5)
ax.set_xticks([])
ax.set_yticks([])
cnt += 1
if x2 == 0:
ax.set_ylabel(C[x1], rotation=90)
if outname:
plt.savefig(outname + ".pdf", dpi=300, transparent="False")
else:
plt.show()
# ------------------------------------------------------------------
# a few random tools
def get_hiers_from_rmf(model, frame_number, rmf_file):
# I have to deprecate this function
print("getting coordinates for frame %i rmf file %s" % (frame_number, rmf_file))
# load the frame
rh = RMF.open_rmf_file_read_only(rmf_file)
try:
prots = IMP.rmf.create_hierarchies(rh, model)
except IOError:
print("Unable to open rmf file %s" % (rmf_file))
return None
#IMP.rmf.link_hierarchies(rh, prots)
try:
IMP.rmf.load_frame(rh, RMF.FrameID(frame_number))
except IOError:
print("Unable to open frame %i of file %s" % (frame_number, rmf_file))
return None
model.update()
del rh
return prots
def link_hiers_to_rmf(model,hiers,frame_number, rmf_file):
print("linking hierarchies for frame %i rmf file %s" % (frame_number, rmf_file))
rh = RMF.open_rmf_file_read_only(rmf_file)
IMP.rmf.link_hierarchies(rh, hiers)
try:
IMP.rmf.load_frame(rh, RMF.FrameID(frame_number))
except:
print("Unable to open frame %i of file %s" % (frame_number, rmf_file))
return False
model.update()
del rh
return True
def get_hiers_and_restraints_from_rmf(model, frame_number, rmf_file):
# I have to deprecate this function
print("getting coordinates for frame %i rmf file %s" % (frame_number, rmf_file))
# load the frame
rh = RMF.open_rmf_file_read_only(rmf_file)
try:
prots = IMP.rmf.create_hierarchies(rh, model)
rs = IMP.rmf.create_restraints(rh, model)
except:
print("Unable to open rmf file %s" % (rmf_file))
return None,None
try:
IMP.rmf.load_frame(rh, RMF.FrameID(frame_number))
except:
print("Unable to open frame %i of file %s" % (frame_number, rmf_file))
return None,None
model.update()
del rh
return prots,rs
def link_hiers_and_restraints_to_rmf(model,hiers,rs, frame_number, rmf_file):
print("linking hierarchies for frame %i rmf file %s" % (frame_number, rmf_file))
rh = RMF.open_rmf_file_read_only(rmf_file)
IMP.rmf.link_hierarchies(rh, hiers)
IMP.rmf.link_restraints(rh, rs)
try:
IMP.rmf.load_frame(rh, RMF.FrameID(frame_number))
except:
print("Unable to open frame %i of file %s" % (frame_number, rmf_file))
return False
model.update()
del rh
return True
def get_hiers_from_rmf(model, frame_number, rmf_file):
print("getting coordinates for frame %i rmf file %s" % (frame_number, rmf_file))
# load the frame
rh = RMF.open_rmf_file_read_only(rmf_file)
try:
prots = IMP.rmf.create_hierarchies(rh, model)
except:
print("Unable to open rmf file %s" % (rmf_file))
prot = None
return prot
#IMP.rmf.link_hierarchies(rh, prots)
try:
IMP.rmf.load_frame(rh, RMF.FrameID(frame_number))
except:
print("Unable to open frame %i of file %s" % (frame_number, rmf_file))
prots = None
model.update()
del rh
return prots
def get_particles_at_resolution_one(prot):
"""Get particles at res 1, or any beads, based on the name.
No Representation is needed. This is mainly used when the hierarchy
is read from an RMF file.
@return a dictionary of component names and their particles
\note If the root node is named "System" or is a "State", do proper selection.
"""
particle_dict = {}
# attempt to give good results for PMI2
if IMP.pmi.get_is_canonical(prot):
for mol in IMP.atom.get_by_type(prot,IMP.atom.MOLECULE_TYPE):
sel = IMP.atom.Selection(mol,resolution=1)
particle_dict[mol.get_name()] = sel.get_selected_particles()
else:
allparticles = []
for c in prot.get_children():
name = c.get_name()
particle_dict[name] = IMP.atom.get_leaves(c)
for s in c.get_children():
if "_Res:1" in s.get_name() and "_Res:10" not in s.get_name():
allparticles += IMP.atom.get_leaves(s)
if "Beads" in s.get_name():
allparticles += IMP.atom.get_leaves(s)
particle_align = []
for name in particle_dict:
particle_dict[name] = IMP.pmi.tools.sort_by_residues(
list(set(particle_dict[name]) & set(allparticles)))
return particle_dict
def get_particles_at_resolution_ten(prot):
"""Get particles at res 10, or any beads, based on the name.
No Representation is needed.
This is mainly used when the hierarchy is read from an RMF file.
@return a dictionary of component names and their particles
\note If the root node is named "System" or is a "State", do proper selection.
"""
particle_dict = {}
# attempt to give good results for PMI2
if IMP.pmi.get_is_canonical(prot):
for mol in IMP.atom.get_by_type(prot,IMP.atom.MOLECULE_TYPE):
sel = IMP.atom.Selection(mol,resolution=10)
particle_dict[mol.get_name()] = sel.get_selected_particles()
else:
allparticles = []
for c in prot.get_children():
name = c.get_name()
particle_dict[name] = IMP.atom.get_leaves(c)
for s in c.get_children():
if "_Res:10" in s.get_name():
allparticles += IMP.atom.get_leaves(s)
if "Beads" in s.get_name():
allparticles += IMP.atom.get_leaves(s)
particle_align = []
for name in particle_dict:
particle_dict[name] = IMP.pmi.tools.sort_by_residues(
list(set(particle_dict[name]) & set(allparticles)))
return particle_dict
def select_by_tuple(first_res_last_res_name_tuple):
first_res = first_res_last_res_hier_tuple[0]
last_res = first_res_last_res_hier_tuple[1]
name = first_res_last_res_hier_tuple[2]
class CrossLinkTable(object):
"""Visualization of crosslinks"""
def __init__(self):
self.crosslinks = []
self.external_csv_data = None
self.crosslinkedprots = set()
self.mindist = +10000000.0
self.maxdist = -10000000.0
self.contactmap = None
def set_hierarchy(self, prot):
self.prot_length_dict = {}
self.model=prot.get_model()
for i in prot.get_children():
name = i.get_name()
residue_indexes = []
for p in IMP.atom.get_leaves(i):
residue_indexes += IMP.pmi.tools.get_residue_indexes(p)
if len(residue_indexes) != 0:
self.prot_length_dict[name] = max(residue_indexes)
def set_coordinates_for_contact_map(self, rmf_name,rmf_frame_index):
from scipy.spatial.distance import cdist
rh= RMF.open_rmf_file_read_only(rmf_name)
prots=IMP.rmf.create_hierarchies(rh, self.model)
IMP.rmf.load_frame(rh, RMF.FrameID(rmf_frame_index))
print("getting coordinates for frame %i rmf file %s" % (rmf_frame_index, rmf_name))
del rh
coords = []
radii = []
namelist = []
particles_dictionary = get_particles_at_resolution_one(prots[0])
resindex = 0
self.index_dictionary = {}
for name in particles_dictionary:
residue_indexes = []
for p in particles_dictionary[name]:
print(p.get_name())
residue_indexes = IMP.pmi.tools.get_residue_indexes(p)
#residue_indexes.add( )
if len(residue_indexes) != 0:
for res in range(min(residue_indexes), max(residue_indexes) + 1):
d = IMP.core.XYZR(p)
crd = np.array([d.get_x(), d.get_y(), d.get_z()])
coords.append(crd)
radii.append(d.get_radius())
if name not in self.index_dictionary:
self.index_dictionary[name] = [resindex]
else:
self.index_dictionary[name].append(resindex)
resindex += 1
coords = np.array(coords)
radii = np.array(radii)
distances = cdist(coords, coords)
distances = (distances - radii).T - radii
distances = np.where(distances <= 20.0, 1.0, 0)
if self.contactmap is None:
self.contactmap = np.zeros((len(coords), len(coords)))
self.contactmap += distances
for prot in prots: IMP.atom.destroy(prot)
def set_crosslinks(
self, data_file, search_label='ISDCrossLinkMS_Distance_',
mapping=None,
filter_label=None,
filter_rmf_file_names=None, #provide a list of rmf base names to filter the stat file
external_csv_data_file=None,
external_csv_data_file_unique_id_key="Unique ID"):
# example key ISDCrossLinkMS_Distance_intrarb_937-State:0-108:RPS3_55:RPS30-1-1-0.1_None
# mapping is a dictionary that maps standard keywords to entry positions in the key string
# confidence class is a filter that
# external datafile is a datafile that contains further information on the crosslinks
# it will use the unique id to create the dictionary keys
po = IMP.pmi.output.ProcessOutput(data_file)
keys = po.get_keys()
xl_keys = [k for k in keys if search_label in k]
if filter_rmf_file_names is not None:
rmf_file_key="local_rmf_file_name"
fs = po.get_fields(xl_keys+[rmf_file_key])
else:
fs = po.get_fields(xl_keys)
# this dictionary stores the occurency of given crosslinks
self.cross_link_frequency = {}
# this dictionary stores the series of distances for given crosslinked
# residues
self.cross_link_distances = {}
# this dictionary stores the series of distances for given crosslinked
# residues
self.cross_link_distances_unique = {}
if not external_csv_data_file is None:
# this dictionary stores the further information on crosslinks
# labeled by unique ID
self.external_csv_data = {}
xldb = IMP.pmi.tools.get_db_from_csv(external_csv_data_file)
for xl in xldb:
self.external_csv_data[
xl[external_csv_data_file_unique_id_key]] = xl
# this list keeps track the tuple of cross-links and sample
# so that we don't count twice the same crosslinked residues in the
# same sample
cross_link_frequency_list = []
self.unique_cross_link_list = []
for key in xl_keys:
print(key)
keysplit = key.replace(
"_",
" ").replace(
"-",
" ").replace(
":",
" ").split(
)
if filter_label!=None:
if filter_label not in keysplit: continue
if mapping is None:
r1 = int(keysplit[5])
c1 = keysplit[6]
r2 = int(keysplit[7])
c2 = keysplit[8]
try:
confidence = keysplit[12]
except:
confidence = '0.0'
try:
unique_identifier = keysplit[3]
except:
unique_identifier = '0'
else:
r1 = int(keysplit[mapping["Residue1"]])
c1 = keysplit[mapping["Protein1"]]
r2 = int(keysplit[mapping["Residue2"]])
c2 = keysplit[mapping["Protein2"]]
try:
confidence = keysplit[mapping["Confidence"]]
except:
confidence = '0.0'
try:
unique_identifier = keysplit[mapping["Unique Identifier"]]
except:
unique_identifier = '0'
self.crosslinkedprots.add(c1)
self.crosslinkedprots.add(c2)
# construct the list of distances corresponding to the input rmf
# files
dists=[]
if filter_rmf_file_names is not None:
for n,d in enumerate(fs[key]):
if fs[rmf_file_key][n] in filter_rmf_file_names:
dists.append(float(d))
else:
dists=[float(f) for f in fs[key]]
# check if the input confidence class corresponds to the
# one of the cross-link
mdist = self.median(dists)
stdv = np.std(np.array(dists))
if self.mindist > mdist:
self.mindist = mdist
if self.maxdist < mdist:
self.maxdist = mdist
# calculate the frequency of unique crosslinks within the same
# sample
if not self.external_csv_data is None:
sample = self.external_csv_data[unique_identifier]["Sample"]
else:
sample = "None"
if (r1, c1, r2, c2,mdist) not in cross_link_frequency_list:
if (r1, c1, r2, c2) not in self.cross_link_frequency:
self.cross_link_frequency[(r1, c1, r2, c2)] = 1
self.cross_link_frequency[(r2, c2, r1, c1)] = 1
else:
self.cross_link_frequency[(r2, c2, r1, c1)] += 1
self.cross_link_frequency[(r1, c1, r2, c2)] += 1
cross_link_frequency_list.append((r1, c1, r2, c2))
cross_link_frequency_list.append((r2, c2, r1, c1))
self.unique_cross_link_list.append(
(r1, c1, r2, c2,mdist))
if (r1, c1, r2, c2) not in self.cross_link_distances:
self.cross_link_distances[(
r1,
c1,
r2,
c2,
mdist,
confidence)] = dists
self.cross_link_distances[(
r2,
c2,
r1,
c1,
mdist,
confidence)] = dists
self.cross_link_distances_unique[(r1, c1, r2, c2)] = dists
else:
self.cross_link_distances[(
r2,
c2,
r1,
c1,
mdist,
confidence)] += dists
self.cross_link_distances[(
r1,
c1,
r2,
c2,
mdist,
confidence)] += dists
self.crosslinks.append(
(r1,
c1,
r2,
c2,
mdist,
stdv,
confidence,
unique_identifier,
'original'))
self.crosslinks.append(
(r2,
c2,
r1,
c1,
mdist,
stdv,
confidence,
unique_identifier,
'reversed'))
self.cross_link_frequency_inverted = {}
for xl in self.unique_cross_link_list:
(r1, c1, r2, c2, mdist) = xl
frequency = self.cross_link_frequency[(r1, c1, r2, c2)]
if frequency not in self.cross_link_frequency_inverted:
self.cross_link_frequency_inverted[
frequency] = [(r1, c1, r2, c2)]
else:
self.cross_link_frequency_inverted[
frequency].append((r1, c1, r2, c2))
# -------------
def median(self, mylist):
sorts = sorted(mylist)
length = len(sorts)
print(length)
if length == 1:
return mylist[0]
if not length % 2:
return (sorts[length / 2] + sorts[length / 2 - 1]) / 2.0
return sorts[length / 2]
def set_threshold(self,threshold):
self.threshold=threshold
def set_tolerance(self,tolerance):
self.tolerance=tolerance
def colormap(self, dist):
if dist < self.threshold - self.tolerance:
return "Green"
elif dist >= self.threshold + self.tolerance:
return "Orange"
else:
return "Red"
def write_cross_link_database(self, filename, format='csv'):
import csv
fieldnames = [
"Unique ID", "Protein1", "Residue1", "Protein2", "Residue2",
"Median Distance", "Standard Deviation", "Confidence", "Frequency", "Arrangement"]
if not self.external_csv_data is None:
keys = list(self.external_csv_data.keys())
innerkeys = list(self.external_csv_data[keys[0]].keys())
innerkeys.sort()
fieldnames += innerkeys
dw = csv.DictWriter(
open(filename,
"w"),
delimiter=',',
fieldnames=fieldnames)
dw.writeheader()
for xl in self.crosslinks:
(r1, c1, r2, c2, mdist, stdv, confidence,
unique_identifier, descriptor) = xl
if descriptor == 'original':
outdict = {}
outdict["Unique ID"] = unique_identifier
outdict["Protein1"] = c1
outdict["Protein2"] = c2
outdict["Residue1"] = r1
outdict["Residue2"] = r2
outdict["Median Distance"] = mdist
outdict["Standard Deviation"] = stdv
outdict["Confidence"] = confidence
outdict["Frequency"] = self.cross_link_frequency[
(r1, c1, r2, c2)]
if c1 == c2:
arrangement = "Intra"
else:
arrangement = "Inter"
outdict["Arrangement"] = arrangement
if not self.external_csv_data is None:
outdict.update(self.external_csv_data[unique_identifier])
dw.writerow(outdict)
def plot(self, prot_listx=None, prot_listy=None, no_dist_info=False,
no_confidence_info=False, filter=None, layout="whole", crosslinkedonly=False,
filename=None, confidence_classes=None, alphablend=0.1, scale_symbol_size=1.0,
gap_between_components=0,
rename_protein_map=None):
# layout can be:
# "lowerdiagonal" print only the lower diagonal plot
# "upperdiagonal" print only the upper diagonal plot
# "whole" print all
# crosslinkedonly: plot only components that have crosslinks
# no_dist_info: if True will plot only the cross-links as grey spots
# filter = tuple the tuple contains a keyword to be search in the database
# a relationship ">","==","<"
# and a value
# example ("ID_Score",">",40)
# scale_symbol_size rescale the symbol for the crosslink
# rename_protein_map is a dictionary to rename proteins
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
# set the list of proteins on the x axis
if prot_listx is None:
if crosslinkedonly:
prot_listx = list(self.crosslinkedprots)
else:
prot_listx = list(self.prot_length_dict.keys())
prot_listx.sort()
nresx = gap_between_components + \
sum([self.prot_length_dict[name]
+ gap_between_components for name in prot_listx])
# set the list of proteins on the y axis
if prot_listy is None:
if crosslinkedonly:
prot_listy = list(self.crosslinkedprots)
else:
prot_listy = list(self.prot_length_dict.keys())
prot_listy.sort()
nresy = gap_between_components + \
sum([self.prot_length_dict[name]
+ gap_between_components for name in prot_listy])
# this is the residue offset for each protein
resoffsetx = {}
resendx = {}
res = gap_between_components
for prot in prot_listx:
resoffsetx[prot] = res
res += self.prot_length_dict[prot]
resendx[prot] = res
res += gap_between_components
resoffsety = {}
resendy = {}
res = gap_between_components
for prot in prot_listy:
resoffsety[prot] = res
res += self.prot_length_dict[prot]
resendy[prot] = res
res += gap_between_components
resoffsetdiagonal = {}
res = gap_between_components
for prot in IMP.pmi.tools.OrderedSet(prot_listx + prot_listy):
resoffsetdiagonal[prot] = res
res += self.prot_length_dict[prot]
res += gap_between_components
# plot protein boundaries
xticks = []
xlabels = []
for n, prot in enumerate(prot_listx):
res = resoffsetx[prot]
end = resendx[prot]
for proty in prot_listy:
resy = resoffsety[proty]
endy = resendy[proty]
ax.plot([res, res], [resy, endy], 'k-', lw=0.4)
ax.plot([end, end], [resy, endy], 'k-', lw=0.4)
xticks.append((float(res) + float(end)) / 2)
if rename_protein_map is not None:
if prot in rename_protein_map:
prot=rename_protein_map[prot]
xlabels.append(prot)
yticks = []
ylabels = []
for n, prot in enumerate(prot_listy):
res = resoffsety[prot]
end = resendy[prot]
for protx in prot_listx:
resx = resoffsetx[protx]
endx = resendx[protx]
ax.plot([resx, endx], [res, res], 'k-', lw=0.4)
ax.plot([resx, endx], [end, end], 'k-', lw=0.4)
yticks.append((float(res) + float(end)) / 2)
if rename_protein_map is not None:
if prot in rename_protein_map:
prot=rename_protein_map[prot]
ylabels.append(prot)
# plot the contact map
print(prot_listx, prot_listy)
if not self.contactmap is None:
import matplotlib.cm as cm
tmp_array = np.zeros((nresx, nresy))
for px in prot_listx:
print(px)
for py in prot_listy:
print(py)
resx = resoffsety[px]
lengx = resendx[px] - 1
resy = resoffsety[py]
lengy = resendy[py] - 1
indexes_x = self.index_dictionary[px]
minx = min(indexes_x)
maxx = max(indexes_x)
indexes_y = self.index_dictionary[py]
miny = min(indexes_y)
maxy = max(indexes_y)
print(px, py, minx, maxx, miny, maxy)
try:
tmp_array[
resx:lengx,
resy:lengy] = self.contactmap[
minx:maxx,
miny:maxy]
except:
continue
ax.imshow(tmp_array,
cmap=cm.binary,
origin='lower',
interpolation='nearest')
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels, rotation=90)
ax.set_yticks(yticks)
ax.set_yticklabels(ylabels)
ax.set_xlim(0,nresx)
ax.set_ylim(0,nresy)
# set the crosslinks
already_added_xls = []
for xl in self.crosslinks:
(r1, c1, r2, c2, mdist, stdv, confidence,
unique_identifier, descriptor) = xl
if confidence_classes is not None:
if confidence not in confidence_classes:
continue
try:
pos1 = r1 + resoffsetx[c1]
except:
continue
try:
pos2 = r2 + resoffsety[c2]
except:
continue
if not filter is None:
xldb = self.external_csv_data[unique_identifier]
xldb.update({"Distance": mdist})
xldb.update({"Distance_stdv": stdv})
if filter[1] == ">":
if float(xldb[filter[0]]) <= float(filter[2]):
continue
if filter[1] == "<":
if float(xldb[filter[0]]) >= float(filter[2]):
continue
if filter[1] == "==":
if float(xldb[filter[0]]) != float(filter[2]):
continue
# all that below is used for plotting the diagonal
# when you have a rectangolar plots
pos_for_diagonal1 = r1 + resoffsetdiagonal[c1]
pos_for_diagonal2 = r2 + resoffsetdiagonal[c2]
if layout == 'lowerdiagonal':
if pos_for_diagonal1 <= pos_for_diagonal2:
continue
if layout == 'upperdiagonal':
if pos_for_diagonal1 >= pos_for_diagonal2:
continue
already_added_xls.append((r1, c1, r2, c2))
if not no_confidence_info:
if confidence == '0.01':
markersize = 14 * scale_symbol_size
elif confidence == '0.05':
markersize = 9 * scale_symbol_size
elif confidence == '0.1':
markersize = 6 * scale_symbol_size
else:
markersize = 15 * scale_symbol_size
else:
markersize = 5 * scale_symbol_size
if not no_dist_info:
color = self.colormap(mdist)
else:
color = "gray"
ax.plot(
[pos1],
[pos2],
'o',
c=color,
alpha=alphablend,
markersize=markersize)
fig.set_size_inches(0.004 * nresx, 0.004 * nresy)
[i.set_linewidth(2.0) for i in ax.spines.values()]
#plt.tight_layout()
if filename:
plt.savefig(filename + ".pdf", dpi=300, transparent="False")
else:
plt.show()
def get_frequency_statistics(self, prot_list,
prot_list2=None):
violated_histogram = {}
satisfied_histogram = {}
unique_cross_links = []
for xl in self.unique_cross_link_list:
(r1, c1, r2, c2, mdist) = xl
# here we filter by the protein
if prot_list2 is None:
if not c1 in prot_list:
continue
if not c2 in prot_list:
continue
else:
if c1 in prot_list and c2 in prot_list2:
pass
elif c1 in prot_list2 and c2 in prot_list:
pass
else:
continue
frequency = self.cross_link_frequency[(r1, c1, r2, c2)]
if (r1, c1, r2, c2) not in unique_cross_links:
if mdist > 35.0:
if frequency not in violated_histogram:
violated_histogram[frequency] = 1
else:
violated_histogram[frequency] += 1
else:
if frequency not in satisfied_histogram:
satisfied_histogram[frequency] = 1
else:
satisfied_histogram[frequency] += 1
unique_cross_links.append((r1, c1, r2, c2))
unique_cross_links.append((r2, c2, r1, c1))
print("# satisfied")
total_number_of_crosslinks=0
for i in satisfied_histogram:
# if i in violated_histogram:
# print i, satisfied_histogram[i]+violated_histogram[i]
# else:
if i in violated_histogram:
print(i, violated_histogram[i]+satisfied_histogram[i])
else:
print(i, satisfied_histogram[i])
total_number_of_crosslinks+=i*satisfied_histogram[i]
print("# violated")
for i in violated_histogram:
print(i, violated_histogram[i])
total_number_of_crosslinks+=i*violated_histogram[i]
print(total_number_of_crosslinks)
# ------------
def print_cross_link_binary_symbols(self, prot_list,
prot_list2=None):
tmp_matrix = []
confidence_list = []
for xl in self.crosslinks:
(r1, c1, r2, c2, mdist, stdv, confidence,
unique_identifier, descriptor) = xl
if prot_list2 is None:
if not c1 in prot_list:
continue
if not c2 in prot_list:
continue
else:
if c1 in prot_list and c2 in prot_list2:
pass
elif c1 in prot_list2 and c2 in prot_list:
pass
else:
continue
if descriptor != "original":
continue
confidence_list.append(confidence)
dists = self.cross_link_distances_unique[(r1, c1, r2, c2)]
tmp_dist_binary = []
for d in dists:
if d < 35:
tmp_dist_binary.append(1)
else:
tmp_dist_binary.append(0)
tmp_matrix.append(tmp_dist_binary)
matrix = list(zip(*tmp_matrix))
satisfied_high_sum = 0
satisfied_mid_sum = 0
satisfied_low_sum = 0
total_satisfied_sum = 0
for k, m in enumerate(matrix):
satisfied_high = 0
total_high = 0
satisfied_mid = 0
total_mid = 0
satisfied_low = 0
total_low = 0
total_satisfied = 0
total = 0
for n, b in enumerate(m):
if confidence_list[n] == "0.01":
total_high += 1
if b == 1:
satisfied_high += 1
satisfied_high_sum += 1
elif confidence_list[n] == "0.05":
total_mid += 1
if b == 1:
satisfied_mid += 1
satisfied_mid_sum += 1
elif confidence_list[n] == "0.1":
total_low += 1
if b == 1:
satisfied_low += 1
satisfied_low_sum += 1
if b == 1:
total_satisfied += 1
total_satisfied_sum += 1
total += 1
print(k, satisfied_high, total_high)
print(k, satisfied_mid, total_mid)
print(k, satisfied_low, total_low)
print(k, total_satisfied, total)
print(float(satisfied_high_sum) / len(matrix))
print(float(satisfied_mid_sum) / len(matrix))
print(float(satisfied_low_sum) / len(matrix))
# ------------
def get_unique_crosslinks_statistics(self, prot_list,
prot_list2=None):
print(prot_list)
print(prot_list2)
satisfied_high = 0
total_high = 0
satisfied_mid = 0
total_mid = 0
satisfied_low = 0
total_low = 0
total = 0
tmp_matrix = []
satisfied_string = []
for xl in self.crosslinks:
(r1, c1, r2, c2, mdist, stdv, confidence,
unique_identifier, descriptor) = xl
if prot_list2 is None:
if not c1 in prot_list:
continue
if not c2 in prot_list:
continue
else:
if c1 in prot_list and c2 in prot_list2:
pass
elif c1 in prot_list2 and c2 in prot_list:
pass
else:
continue
if descriptor != "original":
continue
total += 1
if confidence == "0.01":
total_high += 1
if mdist <= 35:
satisfied_high += 1
if confidence == "0.05":
total_mid += 1
if mdist <= 35:
satisfied_mid += 1
if confidence == "0.1":
total_low += 1
if mdist <= 35:
satisfied_low += 1
if mdist <= 35:
satisfied_string.append(1)
else:
satisfied_string.append(0)
dists = self.cross_link_distances_unique[(r1, c1, r2, c2)]
tmp_dist_binary = []
for d in dists:
if d < 35:
tmp_dist_binary.append(1)
else:
tmp_dist_binary.append(0)
tmp_matrix.append(tmp_dist_binary)
print("unique satisfied_high/total_high", satisfied_high, "/", total_high)
print("unique satisfied_mid/total_mid", satisfied_mid, "/", total_mid)
print("unique satisfied_low/total_low", satisfied_low, "/", total_low)
print("total", total)
matrix = list(zip(*tmp_matrix))
satisfied_models = 0
satstr = ""
for b in satisfied_string:
if b == 0:
satstr += "-"
if b == 1:
satstr += "*"
for m in matrix:
all_satisfied = True
string = ""
for n, b in enumerate(m):
if b == 0:
string += "0"
if b == 1:
string += "1"
if b == 1 and satisfied_string[n] == 1:
pass
elif b == 1 and satisfied_string[n] == 0:
pass
elif b == 0 and satisfied_string[n] == 0:
pass
elif b == 0 and satisfied_string[n] == 1:
all_satisfied = False
if all_satisfied:
satisfied_models += 1
print(string)
print(satstr, all_satisfied)
print("models that satisfies the median satisfied crosslinks/total models", satisfied_models, len(matrix))
def plot_matrix_cross_link_distances_unique(self, figurename, prot_list,
prot_list2=None):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pylab as pl
tmp_matrix = []
for kw in self.cross_link_distances_unique:
(r1, c1, r2, c2) = kw
dists = self.cross_link_distances_unique[kw]
if prot_list2 is None:
if not c1 in prot_list:
continue
if not c2 in prot_list:
continue
else:
if c1 in prot_list and c2 in prot_list2:
pass
elif c1 in prot_list2 and c2 in prot_list:
pass
else:
continue
# append the sum of dists to order by that in the matrix plot
dists.append(sum(dists))
tmp_matrix.append(dists)
tmp_matrix.sort(key=itemgetter(len(tmp_matrix[0]) - 1))
# print len(tmp_matrix), len(tmp_matrix[0])-1
matrix = np.zeros((len(tmp_matrix), len(tmp_matrix[0]) - 1))
for i in range(len(tmp_matrix)):
for k in range(len(tmp_matrix[i]) - 1):
matrix[i][k] = tmp_matrix[i][k]
print(matrix)
fig = pl.figure()
ax = fig.add_subplot(211)
cax = ax.imshow(matrix, interpolation='nearest')
# ax.set_yticks(range(len(self.model_list_names)))
#ax.set_yticklabels( [self.model_list_names[i] for i in leaves_order] )
fig.colorbar(cax)
pl.savefig(figurename, dpi=300)
pl.show()
def plot_bars(
self,
filename,
prots1,
prots2,
nxl_per_row=20,
arrangement="inter",
confidence_input="None"):
data = []
for xl in self.cross_link_distances:
(r1, c1, r2, c2, mdist, confidence) = xl
if c1 in prots1 and c2 in prots2:
if arrangement == "inter" and c1 == c2:
continue
if arrangement == "intra" and c1 != c2:
continue
if confidence_input == confidence:
label = str(c1) + ":" + str(r1) + \
"-" + str(c2) + ":" + str(r2)
values = self.cross_link_distances[xl]
frequency = self.cross_link_frequency[(r1, c1, r2, c2)]
data.append((label, values, mdist, frequency))
sort_by_dist = sorted(data, key=lambda tup: tup[2])
sort_by_dist = list(zip(*sort_by_dist))
values = sort_by_dist[1]
positions = list(range(len(values)))
labels = sort_by_dist[0]
frequencies = list(map(float, sort_by_dist[3]))
frequencies = [f * 10.0 for f in frequencies]
nchunks = int(float(len(values)) / nxl_per_row)
values_chunks = IMP.pmi.tools.chunk_list_into_segments(values, nchunks)
positions_chunks = IMP.pmi.tools.chunk_list_into_segments(
positions,
nchunks)
frequencies_chunks = IMP.pmi.tools.chunk_list_into_segments(
frequencies,
nchunks)
labels_chunks = IMP.pmi.tools.chunk_list_into_segments(labels, nchunks)
for n, v in enumerate(values_chunks):
p = positions_chunks[n]
f = frequencies_chunks[n]
l = labels_chunks[n]
IMP.pmi.output.plot_fields_box_plots(
filename + "." + str(n), v, p, f,
valuename="Distance (Ang)", positionname="Unique " + arrangement + " Crosslinks", xlabels=l)
def crosslink_distance_histogram(self, filename,
prot_list=None,
prot_list2=None,
confidence_classes=None,
bins=40,
color='#66CCCC',
yplotrange=[0, 1],
format="png",
normalized=False):
if prot_list is None:
prot_list = list(self.prot_length_dict.keys())
distances = []
for xl in self.crosslinks:
(r1, c1, r2, c2, mdist, stdv, confidence,
unique_identifier, descriptor) = xl
if not confidence_classes is None:
if confidence not in confidence_classes:
continue
if prot_list2 is None:
if not c1 in prot_list:
continue
if not c2 in prot_list:
continue
else:
if c1 in prot_list and c2 in prot_list2:
pass
elif c1 in prot_list2 and c2 in prot_list:
pass
else:
continue
distances.append(mdist)
IMP.pmi.output.plot_field_histogram(
filename, distances, valuename="C-alpha C-alpha distance [Ang]",
bins=bins, color=color,
format=format,
reference_xline=35.0,
yplotrange=yplotrange, normalized=normalized)
def scatter_plot_xl_features(self, filename,
feature1=None,
feature2=None,
prot_list=None,
prot_list2=None,
yplotrange=None,
reference_ylines=None,
distance_color=True,
format="png"):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
for xl in self.crosslinks:
(r1, c1, r2, c2, mdist, stdv, confidence,
unique_identifier, arrangement) = xl
if prot_list2 is None:
if not c1 in prot_list:
continue
if not c2 in prot_list:
continue
else:
if c1 in prot_list and c2 in prot_list2:
pass
elif c1 in prot_list2 and c2 in prot_list:
pass
else:
continue
xldb = self.external_csv_data[unique_identifier]
xldb.update({"Distance": mdist})
xldb.update({"Distance_stdv": stdv})
xvalue = float(xldb[feature1])
yvalue = float(xldb[feature2])
if distance_color:
color = self.colormap(mdist)
else:
color = "gray"
ax.plot([xvalue], [yvalue], 'o', c=color, alpha=0.1, markersize=7)
if not yplotrange is None:
ax.set_ylim(yplotrange)
if not reference_ylines is None:
for rl in reference_ylines:
ax.axhline(rl, color='red', linestyle='dashed', linewidth=1)
if filename:
plt.savefig(filename + "." + format, dpi=150, transparent="False")
plt.show()
| gpl-3.0 |
dereknewman/cancer_detection | extract_cubes_by_label.py | 1 | 7790 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 12:51:14 2017
@author: derek
"""
import pandas as pd
import SimpleITK
import numpy as np
import tensorflow as tf
import cv2
TARGET_VOXEL_MM = 0.682
BASE_DIR = "/media/derek/disk1/kaggle_ndsb2017/"
def normalize(image):
""" Normalize image -> clip data between -1000 and 400. Scale values to -0.5 to 0.5
"""
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image > 1] = 1.
image[image < 0] = 0.
image -= 0.5
return image
def extract_cube(image_array,z_perc,y_perc,x_perc):
"""extract a 32x32x32 chunk from data specified by the center in percentage
(z_perc,y_perc, x_perc)
Args:
image_array: full size image data cube
z_perc: the z dimensional center given as a percentage of the total z
y_perc: the y dimensional center given as a percentage of the total y
x_perc: the x dimensional center given as a percentage of the total x
Returns:
image_cube: 32x32x32 subsection of image_arrary centered at (z,y,x)
"""
im_z, im_y, im_x = image_array.shape
z_min = int(round(z_perc*im_z)) - 16
y_min = int(round(y_perc*im_y)) - 16
x_min = int(round(x_perc*im_x)) - 16
z_max = int(round(z_perc*im_z)) + 16
y_max = int(round(y_perc*im_y)) + 16
x_max = int(round(x_perc*im_x)) + 16
if z_min < 0:
z_max = z_max + abs(z_min)
z_min = 0
if y_min < 0:
y_max = y_max + abs(y_min)
y_min = 0
if x_min < 0:
x_max = x_max + abs(x_min)
x_min = 0
if z_max > im_z:
z_min = z_min - abs(z_max - im_z)
z_max = im_z
if y_max > im_y:
y_min = y_min - abs(y_max - im_y)
y_max = im_y
if x_max > im_x:
x_min = x_min - abs(x_max - im_x)
x_max = im_x
image_cube = image_array[z_min:z_max,y_min:y_max,x_min:x_max]
return image_cube
def add_to_tfrecord(writer,image_cube, label):
"""add a tfrecord to a tfwriter
Args:
writer: tfwriter
image_cube: usually 32x32x32 cube o data
label: associated truth label for data (usually maligancy, lobulation, spiculation)
Returns:
Nothing
"""
image_cube = np.asarray(image_cube,np.int16) #ensure data is in int16
image_shape = image_cube.shape
binary_cube = image_cube.tobytes()
binary_label = np.array(image_label, np.int16).tobytes()
binary_shape = np.array(image_shape, np.int16).tobytes()
example = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_shape])),
'label': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_label])),
'cube': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_cube]))
}))
writer.write(example.SerializeToString())
def rescale_patient_images(images_zyx, org_spacing_xyz, target_voxel_mm, verbose=False):
"""rescale patient images (3d cube data) to target_voxel_mm
Args:
images_zyx: full size image data cube
org_spacing_xyz: original spacing
target_voxel_mm: size of rescaled voxels
verbose: print extra info
Returns:
image_cube: 32x32x32 subsection of image_arrary centered at (z,y,x)
"""
if verbose:
print("Spacing: ", org_spacing_xyz)
print("Shape: ", images_zyx.shape)
# print "Resizing dim z"
resize_x = 1.0
resize_y = float(org_spacing_xyz[2]) / float(target_voxel_mm)
interpolation = cv2.INTER_LINEAR
res = cv2.resize(images_zyx, dsize=None, fx=resize_x, fy=resize_y, interpolation=interpolation) # opencv assumes y, x, channels umpy array, so y = z pfff
# print "Shape is now : ", res.shape
res = res.swapaxes(0, 2)
res = res.swapaxes(0, 1)
resize_x = float(org_spacing_xyz[0]) / float(target_voxel_mm)
resize_y = float(org_spacing_xyz[1]) / float(target_voxel_mm)
# cv2 can handle max 512 channels..
if res.shape[2] > 512:
res = res.swapaxes(0, 2)
res1 = res[:256]
res2 = res[256:]
res1 = res1.swapaxes(0, 2)
res2 = res2.swapaxes(0, 2)
res1 = cv2.resize(res1, dsize=None, fx=resize_x, fy=resize_y, interpolation=interpolation)
res2 = cv2.resize(res2, dsize=None, fx=resize_x, fy=resize_y, interpolation=interpolation)
res1 = res1.swapaxes(0, 2)
res2 = res2.swapaxes(0, 2)
res = np.vstack([res1, res2])
res = res.swapaxes(0, 2)
else:
res = cv2.resize(res, dsize=None, fx=resize_x, fy=resize_y, interpolation=interpolation)
res = res.swapaxes(0, 2)
res = res.swapaxes(2, 1)
if verbose:
print("Shape after: ", res.shape)
return res
#################
save_path = "/media/derek/disk1/kaggle_ndsb2017/resources/_tfrecords/"
full_dataframe = pd.read_csv(BASE_DIR + "patID_x_y_z_mal.csv")
patients = full_dataframe.patient_id.unique()
#patient = "1.3.6.1.4.1.14519.5.2.1.6279.6001.131939324905446238286154504249"
for patient in patients:
patient_df = full_dataframe.loc[full_dataframe['patient_id'] == patient] #create a dateframe assoicated to a single patient
patient_df = patient_df.sort_values('z_center')
patient_path = patient_df.file_path.unique()[0] #locate the path to the '.mhd' file
print(patient)
#####################################
#### Load and process image ########
#####################################
itk_img = SimpleITK.ReadImage(patient_path)
if (np.array(itk_img.GetDirection()) != np.array([ 1., 0., 0., 0., 1., 0., 0., 0., 1.])).all():
print("WARNING!!!!! Image in different direction")
image_array = SimpleITK.GetArrayFromImage(itk_img)
spacing = np.array(itk_img.GetSpacing()) # spacing of voxels in world coor. (mm)
image_array = rescale_patient_images(image_array, spacing, TARGET_VOXEL_MM)
tfrecord_file0 = save_path + patient + "_0.tfrecord"
tfrecord_file1 = save_path + patient + "_1.tfrecord"
tfrecord_file2 = save_path + patient + "_2.tfrecord"
tfrecord_file3 = save_path + patient + "_3.tfrecord"
tfrecord_file4 = save_path + patient + "_4.tfrecord"
tfrecord_file5 = save_path + patient + "_5.tfrecord"
writer0 = tf.python_io.TFRecordWriter(tfrecord_file0)
writer1 = tf.python_io.TFRecordWriter(tfrecord_file1)
writer2 = tf.python_io.TFRecordWriter(tfrecord_file2)
writer3 = tf.python_io.TFRecordWriter(tfrecord_file3)
writer4 = tf.python_io.TFRecordWriter(tfrecord_file4)
writer5 = tf.python_io.TFRecordWriter(tfrecord_file5)
for index, row in patient_df.iterrows():
#TEMP#####
z_perc = row["z_center_perc"]
y_perc = row["y_center_perc"]
x_perc = row["x_center_perc"]
image_cube = extract_cube(image_array,z_perc,y_perc,x_perc)
image_label = (row["malscore"], row["spiculation"], row["lobulation"])
if row["malscore"] == 0:
add_to_tfrecord(writer0,image_cube, image_label)
if row["malscore"] == 1:
add_to_tfrecord(writer1,image_cube, image_label)
if row["malscore"] == 2:
add_to_tfrecord(writer2,image_cube, image_label)
if row["malscore"] == 3:
add_to_tfrecord(writer3,image_cube, image_label)
if row["malscore"] == 4:
add_to_tfrecord(writer4,image_cube, image_label)
if row["malscore"] == 5:
add_to_tfrecord(writer5,image_cube, image_label)
writer0.close()
writer1.close()
writer2.close()
writer3.close()
writer4.close()
writer5.close()
#np.save(settings.BASE_DIR + "resources/_cubes/" + patient + '_train.npy', (image_cubes, image_labels))
| mit |
nhejazi/scikit-learn | examples/gaussian_process/plot_gpc_isoprobability.py | 64 | 3049 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Iso-probability lines for Gaussian Processes classification (GPC)
=================================================================
A two-dimensional classification example showing iso-probability lines for
the predicted probabilities.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Adapted to GaussianProcessClassifier:
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import DotProduct, ConstantKernel as C
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = plt.figure(1)
ax = fig.gca()
ax.axes.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
cax = plt.imshow(y_prob, cmap=cm.gray_r, alpha=0.8,
extent=(-lim, lim, -lim, lim))
norm = plt.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = plt.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
plt.clim(0, 1)
plt.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
plt.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = plt.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = plt.contour(x1, x2, y_prob, [0.666], colors='b',
linestyles='solid')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.5], colors='k',
linestyles='dashed')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.334], colors='r',
linestyles='solid')
plt.clabel(cs, fontsize=11)
plt.show()
| bsd-3-clause |
maxalbert/bokeh | examples/plotting/file/elements.py | 2 | 1491 | import pandas as pd
from bokeh.plotting import figure, show, output_file
from bokeh.sampledata import periodic_table
elements = periodic_table.elements
elements = elements[elements["atomic number"] <= 82]
elements = elements[~pd.isnull(elements["melting point"])]
mass = [float(x.strip("[]")) for x in elements["atomic mass"]]
elements["atomic mass"] = mass
palette = list(reversed([
"#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#f7f7f7","#d1e5f0","#92c5de","#4393c3","#2166ac","#053061"
]))
melting_points = elements["melting point"]
low = min(melting_points)
high= max(melting_points)
melting_point_inds = [int(10*(x-low)/(high-low)) for x in melting_points] #gives items in colors a value from 0-10
meltingpointcolors = [palette[i] for i in melting_point_inds]
output_file("elements.html", title="elements.py example")
TOOLS = "pan,wheel_zoom,box_zoom,reset,resize,save"
p = figure(tools=TOOLS, toolbar_location="left", logo="grey", plot_width=1200)
p.title = "Density vs Atomic Weight of Elements (colored by melting point)"
p.background_fill_color= "#cccccc"
p.circle(elements["atomic mass"], elements["density"], size=12,
color=meltingpointcolors, line_color="black", fill_alpha=0.8)
p.text(elements["atomic mass"], elements["density"]+0.3,
text=elements["symbol"],text_color="#333333",
text_align="center", text_font_size="10pt")
p.xaxis.axis_label="atomic weight (amu)"
p.yaxis.axis_label="density (g/cm^3)"
p.grid.grid_line_color="white"
show(p)
| bsd-3-clause |
dpinney/omf | omf/scratch/transients/04_Classical_Nine_Bus/test_nine_bus.py | 1 | 6094 | #!python3
#
# Copyright (C) 2014-2015 Julius Susanto. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
PYPOWER-Dynamics
Classical Stability Test
"""
# Dynamic model classes
from pydyn.ext_grid import ext_grid
from pydyn.sym_order6a import sym_order6a
from pydyn.sym_order6b import sym_order6b
from pydyn.sym_order4 import sym_order4
from pydyn.asym_1cage import asym_1cage
# Simulation modules
from pydyn.events import events
from pydyn.recorder import recorder
from pydyn.run_sim import run_sim
# External modules
from pypower.loadcase import loadcase
import matplotlib.pyplot as plt
import numpy as np
from omf.scratch.transients import montefaults as mf
if __name__ == '__main__':
#########
# SETUP #
#########
print('----------------------------------------')
print('PYPOWER-Dynamics - Classical 9 Bus Test')
print('----------------------------------------')
# Load PYPOWER case
ppc = loadcase('case9.py')
# Program options
dynopt = {}
dynopt['h'] = 0.001 # step length (s)
dynopt['t_sim'] = 200.0 # simulation time (s)
dynopt['max_err'] = 1e-6 # Maximum error in network iteration (voltage mismatches)
dynopt['max_iter'] = 25 # Maximum number of network iterations
dynopt['verbose'] = False # option for verbose messages
dynopt['fn'] = 60 # Nominal system frequency (Hz)
# Integrator option
dynopt['iopt'] = 'mod_euler'
#dynopt['iopt'] = 'runge_kutta'
# Create dynamic model objects
G1 = ext_grid('GEN1', 0, 0.0608, 23.64, dynopt)
G2 = ext_grid('GEN2', 1, 0.1198, 6.01, dynopt)
G3 = ext_grid('GEN3', 2, 0.1813, 3.01, dynopt)
B1 = asym_1cage('B1.mach', dynopt)
# B2 = sym_order6b('B2.mach', dynopt)
# B3 = sym_order6b('B3.mach', dynopt)
# B4 = sym_order6b('B4.mach', dynopt)
# Create dictionary of elements
elements = {}
elements[G1.id] = G1
elements[G2.id] = G2
elements[G3.id] = G3
elements[B1.id] = B1
# elements[B2.id] = B2
# elements[B3.id] = B3
# elements[B4.id] = B4
# Create event stack
oEvents = events('events.evnt')
event1 = [10.3, 'LOAD', 2, -10, -10]
# event2 = [10.35, 'LOAD', 2, -10, -10]
# event3 = [1.38, 'LOAD',3, -100, -100]
oEvents.event_stack.append(event1)
# oEvents.event_stack.append(event2)
# oEvents.event_stack.append(event3)
# oEvents = mf.addRandomEvents(ppc, oEvents, 5, dynopt['h'], dynopt['t_sim'], 0.05, 0.03)
# Create recorder object
oRecord = recorder('recorder.rcd')
# Run simulation
oRecord = run_sim(ppc,elements,dynopt,oEvents,oRecord)
# # Calculate relative rotor angles
# rel_delta01 = np.array(oRecord.results['GEN1:delta'])
# rel_delta02 = np.array(oRecord.results['BUS1:delta'])
# rel_delta11 = np.array(oRecord.results['GEN2:delta'])
# rel_delta12 = np.array(oRecord.results['BUS2:delta'])
# rel_delta21 = np.array(oRecord.results['GEN3:delta'])
# rel_delta22 = np.array(oRecord.results['BUS3:delta'])
# rel_delta31 = np.array(oRecord.results['GEN1:P'])
# rel_delta32 = np.array(oRecord.results['BUS1:P'])
# rel_delta42 = np.array(oRecord.results['BUS2:P'])
# rel_delta52 = np.array(oRecord.results['BUS3:P'])
# # Plot variables
# plt.plot(oRecord.t_axis,rel_delta01 * 180 / np.pi, 'r-', oRecord.t_axis, rel_delta11 *180 / np.pi, 'b-', oRecord.t_axis, rel_delta21 *180 / np.pi, 'g-')
# plt.xlabel('Time (s)')
# plt.ylabel('Rotor Angles')
# plt.show()
# plt.plot(oRecord.t_axis,rel_delta02 * 180 / np.pi, 'r-', oRecord.t_axis, rel_delta12 *180 / np.pi, 'b-', oRecord.t_axis, rel_delta22 *180 / np.pi, 'g-')
# plt.xlabel('Time (s)')
# plt.ylabel('Rotor Angles')
# plt.show()
# plt.plot(oRecord.t_axis,rel_delta31)
# plt.ylabel('Power of GEN1')
# plt.show()
# plt.plot(oRecord.t_axis,rel_delta32)
# plt.ylabel('Power of BUS1')
# plt.show()
# plt.plot(oRecord.t_axis,rel_delta42)
# plt.ylabel('Power of BUS2')
# plt.show()
# plt.plot(oRecord.t_axis,rel_delta52)
# plt.ylabel('Power of BUS3')
# plt.show()
fig, axs = plt.subplots(3, 3)
axs[0, 0].plot(oRecord.t_axis, np.array(oRecord.results['GEN1:delta']) * 180 / np.pi)
axs[0, 0].set_title('Rotor Angle (GEN1)')
axs[0, 0].set(ylabel='radians')
axs[0, 1].plot(oRecord.t_axis, np.array(oRecord.results['GEN2:delta']) * 180 / np.pi, 'tab:orange')
axs[0, 1].set_title('Rotor Angle (GEN2)')
axs[0, 1].set(ylabel='radians')
axs[0, 2].plot(oRecord.t_axis, np.array(oRecord.results['GEN3:delta']) * 180 / np.pi, 'tab:green')
axs[0, 2].set_title('Rotor Angle (GEN3)')
axs[0, 2].set(ylabel='radians')
axs[1, 0].plot(oRecord.t_axis, np.array(oRecord.results['BUS7:P']) * 100)
axs[1, 0].set_title('Power (GEN1)')
axs[1, 0].set(ylabel='MW')
axs[1, 1].plot(oRecord.t_axis, np.array(oRecord.results['GEN2:P']) * 100, 'tab:orange')
axs[1, 1].set_title('Power (GEN2)')
axs[1, 1].set(ylabel='MW')
axs[1, 2].plot(oRecord.t_axis, np.array(oRecord.results['GEN3:P']) * 100, 'tab:green')
axs[1, 2].set_title('Power (GEN3)')
axs[1, 2].set(ylabel='MW')
axs[2, 0].plot(oRecord.t_axis, np.array(oRecord.results['GEN1:omega']) * 180 / np.pi)
axs[2, 0].set_title('Frequency (GEN1)')
axs[2, 0].set(ylabel='Hz')
axs[2, 1].plot(oRecord.t_axis, np.array(oRecord.results['GEN2:omega']) * 180 / np.pi, 'tab:orange')
axs[2, 1].set_title('Frequency (GEN2)')
axs[2, 1].set(ylabel='Hz')
axs[2, 2].plot(oRecord.t_axis, np.array(oRecord.results['GEN3:omega']) * 180 / np.pi, 'tab:green')
axs[2, 2].set_title('Frequency (GEN3)')
axs[2, 2].set(ylabel='Hz')
for ax in axs.flat:
ax.set(xlabel='time (s)')
# Hide x labels and tick labels for top plots and y ticks for right plots.
plt.show()
# Write recorded variables to output file
oRecord.write_output('output.csv') | gpl-2.0 |
YinongLong/scikit-learn | setup.py | 4 | 11924 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
import subprocess
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
SCIPY_MIN_VERSION = '0.9'
NUMPY_MIN_VERSION = '1.6.1'
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
'alldeps': (
'numpy >= {0}'.format(NUMPY_MIN_VERSION),
'scipy >= {0}'.format(SCIPY_MIN_VERSION),
),
},
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(SCIPY_MIN_VERSION)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(NUMPY_MIN_VERSION)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable, os.path.join(cwd,
'build_tools',
'cythonize.py'),
'sklearn'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required, nor Cythonization
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
NUMPY_MIN_VERSION)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
SCIPY_MIN_VERSION)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
if len(sys.argv) >= 2 and sys.argv[1] not in 'config':
# Cythonize if needed
print('Generating cython files')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
# Clean left-over .so file
for dirpath, dirnames, filenames in os.walk(
os.path.join(cwd, 'sklearn')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension in (".so", ".pyd", ".dll"):
pyx_file = str.replace(filename, extension, '.pyx')
print(pyx_file)
if not os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
xyguo/scikit-learn | sklearn/ensemble/__init__.py | 153 | 1382 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .iforest import IsolationForest
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
DLR-SC/tigl | misc/math-scripts/ComponentSegmentNew.py | 2 | 6822 | #
# Copyright (C) 2007-2011 German Aerospace Center (DLR/SC)
#
# Created: 2013-04-19 Martin Siggel <Martin.Siggel@dlr.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @file ComponentSegmentNew.py
# @brief Implementation of the component segment geometry
#
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import Polygonzug as PZ
from ms_segmentGeometry import SegmentGeometry, SegmentMathError
from numpy import size, array, dot, arange, zeros, linspace, linalg
class ComponentSegment(object):
def __init__(self, le, te):
assert(size(le,1) == size(te,1))
assert(size(le,0) == size(te,0) == 3)
assert(size(le,1) >= 2)
self.lepoints = le.copy()
self.tepoints = te.copy()
self.segments = {}
nseg = size(le,1)-1
for iseg in range(0,nseg):
self.segments[iseg] = SegmentGeometry(self.lepoints[:,iseg], self.lepoints[:,iseg+1], self.tepoints[:,iseg], self.tepoints[:,iseg+1])
# extend leading edge at wing tip
n = le[:,nseg]-le[:,nseg-1]
n[0] = 0
tep = te[:,nseg]
alpha = dot(tep-le[:,nseg-1],n)/dot(le[:,nseg]-le[:,nseg-1],n)
if alpha > 1:
P = le[:,nseg-1] + (le[:,nseg]-le[:,nseg-1])*alpha
le[:,nseg] = P
# extend leading edge at inner wing
n = le[:,1]-le[:,0]
n[0] = 0
tep = te[:,0]
alpha = dot(tep-le[:,0],n)/dot(le[:,1]-le[:,0],n)
if alpha < 0:
P = le[:,0] + (le[:,1]-le[:,0])*alpha
le[:,0] = P
# project onto y-z plane
self.le = PZ.PolygonWRoundedEdges(le[1:3,:])
self.le.setRadius(0.01)
self.te = PZ.PolygonNormal(te[1:3,:])
def calcPoint(self, eta, xsi):
nseg = size(self.lepoints,1)-1
pyz, nyz, iSegBegin = self.le.calcPoint(eta)
#print 'proj:', self.le.project(array([pyz[0], pyz[1]])), eta
P = array([0, pyz[0], pyz[1]])
N = array([0, nyz[0], nyz[1]])
# calculate intersection with leading edge of segment
PV = self.segments[iSegBegin].calcIntersectLE(P,N)
_ , _ , iSegEnd = self.te.calcPoint(eta)
PH = self.segments[iSegEnd].calcIntersectTE(P,N)
# calculate point on line between leading and trailing edge
PL = PV + (PH-PV)*xsi
if iSegEnd < iSegBegin:
iSegBegin, iSegEnd = iSegEnd, iSegBegin
# project point onto segment
for iseg in range(iSegBegin, iSegEnd+1):
try:
# @todo: , if the point does not lie on the current segment
# the projection might not converge (there may be not solution).
# this is not bad, but takes some time. Find a way to determine
# in advance on which segment the points lies. one way to do so
# is to project the segment edge on the intersection line and
# determine the intersection parameter
(alpha, beta) = self.segments[iseg].projectPointOnCut(PL, P,N)
except SegmentMathError:
continue
# in the last and first segment, alpha and beta dont have to be valid, due to the extension of the leading edge
if SegmentGeometry.isValid(alpha, beta):
return self.segments[iseg].getPoint(alpha, beta)[:,0]
elif iseg == 0 and alpha < 0.:
return self.segments[iseg].getPoint(alpha, beta)[:,0]
elif iseg == nseg-1 and alpha > 1.:
return self.segments[iseg].getPoint(alpha, beta)[:,0]
raise NameError('Error determining segment index in ComponentSegment.calcPoint')
def project(self, point):
# get the eta coordinate of the point
eta = self.le.project(point[1:3])
# get point on the leading edge and normal vector
pyz, nyz, iSegBegin = self.le.calcPoint(eta)
_ , _ , iSegEnd = self.te.calcPoint(eta)
P = array([0, pyz[0], pyz[1]])
N = array([0, nyz[0], nyz[1]])
# calculate intersection with leading edge of segment
PV = self.segments[iSegBegin].calcIntersectLE(P,N)[:,0];
PH = self.segments[iSegEnd].calcIntersectTE(P,N)[:,0];
# now project point back on the line pv-ph
xsi = dot(point-PV, PH-PV)/(linalg.norm(PH-PV)**2)
return eta, xsi
def plot(self, axis=None):
if not axis:
axis = plt.gca(projection='3d')
nseg = size(self.lepoints, 1) - 1
style= 'b-'
axis.plot(self.lepoints[0,:], self.lepoints[1,:], self.lepoints[2,:], style)
axis.plot(self.tepoints[0,:], self.tepoints[1,:], self.tepoints[2,:], style)
for iseg in xrange(0,nseg+1):
axis.plot([self.lepoints[0,iseg], self.tepoints[0,iseg]], [self.lepoints[1,iseg], self.tepoints[1,iseg]], [self.lepoints[2,iseg], self.tepoints[2,iseg]], style)
#calc iso-eta lines
for eta in arange(0.,1.01, 0.1):
xsis = linspace(0.,1.0, 20)
P = zeros((3,size(xsis)))
for i in xrange(0,size(xsis)):
P[:,i] = self.calcPoint(eta, xsis[i])
axis.plot(P[0,:], P[1,:], P[2,:],'r')
#calc iso-xsi lines
for xsi in arange(0.,1.01, 0.1):
etas = linspace(0.,1.0, 20)
P = zeros((3,size(etas)))
for i in xrange(0,size(etas)):
P[:,i] = self.calcPoint(etas[i], xsi)
axis.plot(P[0,:], P[1,:], P[2,:],'r')
axis.set_xlabel('x [m]')
axis.set_ylabel('y [m]')
axis.set_zlabel('z [m]')
vk = array([[ 0.0, 2.0, 3.4],
[ 0.5, 4.4, 9.0],
[-0.3, 0.0, 0.4]])
hk = array([[3.5, 3.5, 4.0],
[0.0, 6.4, 9.5],
[0.0, 0.0, 0.5]])
cs = ComponentSegment(vk, hk)
#print P
fig = plt.figure()
cs.plot()
P = cs.calcPoint(0.3, 0.7)
(eta, xsi) = cs.project(P)
print 'eta,xsi:' , eta, xsi
fig.gca().plot([P[0]], [P[1]], [P[2]], 'gx')
plt.show() | apache-2.0 |
ischwabacher/seaborn | seaborn/miscplot.py | 34 | 1498 | from __future__ import division
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
def palplot(pal, size=1):
"""Plot the values in a color palette as a horizontal array.
Parameters
----------
pal : sequence of matplotlib colors
colors, i.e. as returned by seaborn.color_palette()
size :
scaling factor for size of plot
"""
n = len(pal)
f, ax = plt.subplots(1, 1, figsize=(n * size, size))
ax.imshow(np.arange(n).reshape(1, n),
cmap=mpl.colors.ListedColormap(list(pal)),
interpolation="nearest", aspect="auto")
ax.set_xticks(np.arange(n) - .5)
ax.set_yticks([-.5, .5])
ax.set_xticklabels([])
ax.set_yticklabels([])
def puppyplot(grown_up=False):
"""Plot today's daily puppy. Only works in the IPython notebook."""
from .external.six.moves.urllib.request import urlopen
from IPython.display import HTML
try:
from bs4 import BeautifulSoup
url = "http://www.dailypuppy.com"
if grown_up:
url += "/dogs"
html_doc = urlopen(url)
soup = BeautifulSoup(html_doc)
puppy = soup.find("div", {"class": "daily_puppy"})
return HTML(str(puppy.img))
except ImportError:
html = ('<img src="http://cdn-www.dailypuppy.com/dog-images/'
'decker-the-nova-scotia-duck-tolling-retriever_'
'72926_2013-11-04_w450.jpg" style="width:450px;"/>')
return HTML(html)
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/statistics/boxplot_demo.py | 3 | 2674 | """
Demo of the new boxplot functionality
"""
import numpy as np
import matplotlib.pyplot as plt
# fake data
np.random.seed(937)
data = np.random.lognormal(size=(37, 4), mean=1.5, sigma=1.75)
labels = list('ABCD')
fs = 10 # fontsize
# demonstrate how to toggle the display of different elements:
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(6,6))
axes[0, 0].boxplot(data, labels=labels)
axes[0, 0].set_title('Default', fontsize=fs)
axes[0, 1].boxplot(data, labels=labels, showmeans=True)
axes[0, 1].set_title('showmeans=True', fontsize=fs)
axes[0, 2].boxplot(data, labels=labels, showmeans=True, meanline=True)
axes[0, 2].set_title('showmeans=True,\nmeanline=True', fontsize=fs)
axes[1, 0].boxplot(data, labels=labels, showbox=False, showcaps=False)
axes[1, 0].set_title('Tufte Style \n(showbox=False,\nshowcaps=False)', fontsize=fs)
axes[1, 1].boxplot(data, labels=labels, notch=True, bootstrap=10000)
axes[1, 1].set_title('notch=True,\nbootstrap=10000', fontsize=fs)
axes[1, 2].boxplot(data, labels=labels, showfliers=False)
axes[1, 2].set_title('showfliers=False', fontsize=fs)
for ax in axes.flatten():
ax.set_yscale('log')
ax.set_yticklabels([])
fig.subplots_adjust(hspace=0.4)
plt.show()
# demonstrate how to customize the display different elements:
boxprops = dict(linestyle='--', linewidth=3, color='darkgoldenrod')
flierprops = dict(marker='o', markerfacecolor='green', markersize=12,
linestyle='none')
medianprops = dict(linestyle='-.', linewidth=2.5, color='firebrick')
meanpointprops = dict(marker='D', markeredgecolor='black',
markerfacecolor='firebrick')
meanlineprops = dict(linestyle='--', linewidth=2.5, color='purple')
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(6,6))
axes[0, 0].boxplot(data, boxprops=boxprops)
axes[0, 0].set_title('Custom boxprops', fontsize=fs)
axes[0, 1].boxplot(data, flierprops=flierprops, medianprops=medianprops)
axes[0, 1].set_title('Custom medianprops\nand flierprops', fontsize=fs)
axes[0, 2].boxplot(data, whis='range')
axes[0, 2].set_title('whis="range"', fontsize=fs)
axes[1, 0].boxplot(data, meanprops=meanpointprops, meanline=False,
showmeans=True)
axes[1, 0].set_title('Custom mean\nas point', fontsize=fs)
axes[1, 1].boxplot(data, meanprops=meanlineprops, meanline=True, showmeans=True)
axes[1, 1].set_title('Custom mean\nas line', fontsize=fs)
axes[1, 2].boxplot(data, whis=[15, 85])
axes[1, 2].set_title('whis=[15, 85]\n#percentiles', fontsize=fs)
for ax in axes.flatten():
ax.set_yscale('log')
ax.set_yticklabels([])
fig.suptitle("I never said they'd be pretty")
fig.subplots_adjust(hspace=0.4)
plt.show()
| mit |
abhisg/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 272 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
0asa/scikit-learn | sklearn/cross_validation.py | 2 | 63023 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import _is_arraylike, _num_samples, check_array
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
__all__ = ['Bootstrap',
'KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n, indices=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
self._indices = indices
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
indices = self._indices
if indices:
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
if indices:
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p, indices=None):
super(LeavePOut, self).__init__(n, indices)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, indices, shuffle, random_state):
super(_BaseKFold, self).__init__(n, indices)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, indices, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, indices, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = np.bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels, indices=None):
super(LeaveOneLabelOut, self).__init__(len(labels), indices)
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p, indices=None):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels), indices)
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class Bootstrap(object):
"""Random sampling with replacement cross-validation iterator
Provides train/test indices to split data in train test sets
while resampling the input n_iter times: each time a new
random split of the data is performed and then samples are drawn
(with replacement) on each side of the split to build the training
and test sets.
Note: contrary to other cross-validation strategies, bootstrapping
will allow some samples to occur several times in each splits. However
a sample that occurs in the train split will never occur in the test
split and vice-versa.
If you want each sample to occur at most once you should probably
use ShuffleSplit cross validation instead.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default is 3)
Number of bootstrapping iterations
train_size : int or float (default is 0.5)
If int, number of samples to include in the training split
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split.
test_size : int or float or None (default is None)
If int, number of samples to include in the training set
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split.
If None, n_test is set as the complement of n_train.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> bs = cross_validation.Bootstrap(9, random_state=0)
>>> len(bs)
3
>>> print(bs)
Bootstrap(9, n_iter=3, train_size=5, test_size=4, random_state=0)
>>> for train_index, test_index in bs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [1 8 7 7 8] TEST: [0 3 0 5]
TRAIN: [5 4 2 4 2] TEST: [6 7 1 0]
TRAIN: [4 7 0 1 1] TEST: [5 3 6 5]
See also
--------
ShuffleSplit: cross validation using random permutations.
"""
# Static marker to be able to introspect the CV type
indices = True
def __init__(self, n, n_iter=3, train_size=.5, test_size=None,
random_state=None):
# See, e.g., http://youtu.be/BzHz0J9a6k0?t=9m38s for a motivation
# behind this deprecation
warnings.warn("Bootstrap will no longer be supported as a " +
"cross-validation method as of version 0.15 and " +
"will be removed in 0.17", DeprecationWarning)
self.n = n
self.n_iter = n_iter
if (isinstance(train_size, numbers.Real) and train_size >= 0.0
and train_size <= 1.0):
self.train_size = int(ceil(train_size * n))
elif isinstance(train_size, numbers.Integral):
self.train_size = train_size
else:
raise ValueError("Invalid value for train_size: %r" %
train_size)
if self.train_size > n:
raise ValueError("train_size=%d should not be larger than n=%d" %
(self.train_size, n))
if isinstance(test_size, numbers.Real) and 0.0 <= test_size <= 1.0:
self.test_size = int(ceil(test_size * n))
elif isinstance(test_size, numbers.Integral):
self.test_size = test_size
elif test_size is None:
self.test_size = self.n - self.train_size
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if self.test_size > n - self.train_size:
raise ValueError(("test_size + train_size=%d, should not be " +
"larger than n=%d") %
(self.test_size + self.train_size, n))
self.random_state = random_state
def __iter__(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_train = permutation[:self.train_size]
ind_test = permutation[self.train_size:self.train_size
+ self.test_size]
# bootstrap in each split individually
train = rng.randint(0, self.train_size,
size=(self.train_size,))
test = rng.randint(0, self.test_size,
size=(self.test_size,))
yield ind_train[train], ind_test[test]
def __repr__(self):
return ('%s(%d, n_iter=%d, train_size=%d, test_size=%d, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
self.train_size,
self.test_size,
self.random_state,
))
def __len__(self):
return self.n_iter
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning)
self.n = n
self.n_iter = n_iter
if n_iterations is not None: # pragma: no cover
warnings.warn("n_iterations was renamed to n_iter for consistency "
" and will be removed in 0.16.")
self.n_iter = n_iterations
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._indices = indices
self.n_train, self.n_test = _validate_shuffle_split(n,
test_size,
train_size)
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
if self._indices:
for train, test in self._iter_indices():
yield train, test
return
for train, test in self._iter_indices():
train_m = np.zeros(self.n, dtype=bool)
test_m = np.zeros(self.n, dtype=bool)
train_m[train] = True
test_m[test] = True
yield train_m, test_m
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
See also
--------
Bootstrap: cross-validation using re-sampling with replacement.
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, indices, random_state,
n_iterations)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(np.bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = np.bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(np.bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, X.shape[0]):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier, warn_mask=True)
def _check_cv(cv, X=None, y=None, classifier=False, warn_mask=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
needs_indices = is_sparse or not hasattr(X, "shape")
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if warn_mask and not needs_indices:
warnings.warn('check_cv will return indices instead of boolean '
'masks from 0.17', DeprecationWarning)
else:
needs_indices = None
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv, indices=needs_indices)
else:
cv = KFold(_num_samples(y), cv, indices=needs_indices)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv, indices=needs_indices)
if needs_indices and not getattr(cv, "_indices", True):
raise ValueError("Sparse data and lists require indices-based cross"
" validation generator, got: %r", cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> a, b = np.arange(10).reshape((5, 2)), range(5)
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(b)
[0, 1, 2, 3, 4]
>>> a_train, a_test, b_train, b_test = train_test_split(
... a, b, test_size=0.33, random_state=42)
...
>>> a_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> b_train
[2, 0, 3]
>>> a_test
array([[2, 3],
[8, 9]])
>>> b_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False) if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
Myasuka/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
jjx02230808/project0223 | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 157 | 2409 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
jonathandunn/c2xg | c2xg/modules/rdrpos_tagger/pSCRDRtagger/RDRPOSTagger.py | 1 | 5379 | # -*- coding: utf-8 -*-
import os
import sys
import cytoolz as ct
from sklearn.utils import murmurhash3_32
from multiprocessing import Pool
from ..SCRDRlearner.SCRDRTree import SCRDRTree
from ..InitialTagger.InitialTagger import initializeCorpus, initializeSentence
from ..SCRDRlearner.Object import FWObject
from ..Utility.Utils import getWordTag, getRawText, readDictionary
def unwrap_self_RDRPOSTagger(arg, **kwarg):
return RDRPOSTagger.tagRawSentence(*arg, **kwarg)
class RDRPOSTagger(SCRDRTree):
"""
RDRPOSTagger for a particular language
"""
def __init__(self, DICT, word_dict = None):
self.root = None
self.word_dict = word_dict
self.DICT = DICT
def tagRawSentenceHash(self, rawLine):
line = initializeSentence(self.DICT, rawLine)
sen = []
wordTags = line.split()
for i in range(len(wordTags)):
fwObject = FWObject.getFWObject(wordTags, i)
word, tag = getWordTag(wordTags[i])
node = self.findFiredNode(fwObject)
#Format and return tagged word
if node.depth > 0:
tag = node.conclusion
#Special units
if "<" in word:
if word in ["<url>", "<email>" "<phone>", "<cur>"]:
tag = "NOUN"
elif word == "<number>":
tag = "NUM"
#Hash word / tag
tag_hash = murmurhash3_32(tag, seed=0)
word_hash = murmurhash3_32(word, seed=0)
#Get semantic category, if it is an open-class word
if tag in ["ADJ", "ADV", "INTJ", "NOUN", "PROPN", "VERB"]:
word_cat = self.word_dict.get(word_hash, -1)
#Closed class words don't have a semantic category
else:
word_cat = -1
#Add to list
sen.append((word_hash, tag_hash, word_cat))
return sen
def tagRawSentence(self, rawLine, pos_dict):
line = initializeSentence(self.DICT, rawLine)
sen = []
wordTags = line.split()
for i in range(len(wordTags)):
fwObject = FWObject.getFWObject(wordTags, i)
word, tag = getWordTag(wordTags[i])
node = self.findFiredNode(fwObject)
if node.depth > 0:
current_dict = ct.get(word.lower(), self.word_dict, default = 0)
if current_dict == 0:
sen.append((0, ct.get(node.conclusion.lower(), pos_dict, default = 0), 0))
else:
sen.append((ct.get("index", current_dict), ct.get(node.conclusion.lower(), pos_dict, default = 0), ct.get("domain", current_dict)))
else:# Fired at root, return initialized tag
current_dict = ct.get(word.lower(), self.word_dict, default = 0)
if current_dict == 0:
sen.append((0, ct.get(tag.lower(), pos_dict), 0))
else:
sen.append((ct.get("index", current_dict), ct.get(tag.lower(), pos_dict, default = 0), ct.get("domain", current_dict)))
return sen
def tagRawSentenceList(self, rawLine):
line = initializeSentence(self.DICT, rawLine)
sen = []
wordTags = line.split()
for i in range(len(wordTags)):
fwObject = FWObject.getFWObject(wordTags, i)
word, tag = getWordTag(wordTags[i])
node = self.findFiredNode(fwObject)
if node.depth > 0:
sen.append((word + "/" + node.conclusion, node.conclusion))
else:# Fired at root, return initialized tag
sen.append((word + "/" + tag, tag))
return sen
def printHelp():
print("\n===== Usage =====")
print('\n#1: To train RDRPOSTagger on a gold standard training corpus:')
print('\npython RDRPOSTagger.py train PATH-TO-GOLD-STANDARD-TRAINING-CORPUS')
print('\nExample: python RDRPOSTagger.py train ../data/goldTrain')
print('\n#2: To use the trained model for POS tagging on a raw text corpus:')
print('\npython RDRPOSTagger.py tag PATH-TO-TRAINED-MODEL PATH-TO-LEXICON PATH-TO-RAW-TEXT-CORPUS')
print('\nExample: python RDRPOSTagger.py tag ../data/goldTrain.RDR ../data/goldTrain.DICT ../data/rawTest')
print('\n#3: Find the full usage at http://rdrpostagger.sourceforge.net !')
def run(args = sys.argv[1:]):
if (len(args) == 0):
printHelp()
elif args[0].lower() == "train":
try:
print("\n====== Start ======")
print("\nGenerate from the gold standard training corpus a lexicon " + args[1] + ".DICT")
createLexicon(args[1], 'full')
createLexicon(args[1], 'short')
print("\nExtract from the gold standard training corpus a raw text corpus " + args[1] + ".RAW")
getRawText(args[1], args[1] + ".RAW")
print("\nPerform initially POS tagging on the raw text corpus, to generate " + args[1] + ".INIT")
DICT = readDictionary(args[1] + ".sDict")
initializeCorpus(DICT, args[1] + ".RAW", args[1] + ".INIT")
print('\nLearn a tree model of rules for POS tagging from %s and %s' % (args[1], args[1] + ".INIT"))
rdrTree = SCRDRTreeLearner(THRESHOLD[0], THRESHOLD[1])
rdrTree.learnRDRTree(args[1] + ".INIT", args[1])
print("\nWrite the learned tree model to file " + args[1] + ".RDR")
rdrTree.writeToFile(args[1] + ".RDR")
print('\nDone!')
os.remove(args[1] + ".INIT")
os.remove(args[1] + ".RAW")
os.remove(args[1] + ".sDict")
except Exception as e:
print("\nERROR ==> ", e)
printHelp()
elif args[0].lower() == "tag":
try:
r = RDRPOSTagger()
print("\n=> Read a POS tagging model from " + args[1])
r.constructSCRDRtreeFromRDRfile(args[1])
print("\n=> Read a lexicon from " + args[2])
DICT = readDictionary(args[2])
print("\n=> Perform POS tagging on " + args[3])
r.tagRawCorpus(DICT, args[3])
except Exception as e:
print("\nERROR ==> ", e)
printHelp()
else:
printHelp()
if __name__ == "__main__":
run()
pass
| gpl-3.0 |
mikebenfield/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
yangautumn/turing_pattern | amorphous_pattern/test.py | 1 | 1800 |
"""
# demo of plotting squared subfigures
"""
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots()
# x = [0, 0.2, 0.4, 0.6, 0.8]
# y = [0, 0.5, 1, 1.5, 2.0]
# colors = ['k']*len(x)
# ax.scatter(x, y, c=colors, alpha=0.5)
# ax.set_xlim((0,2))
# ax.set_ylim((0,2))
# x0,x1 = ax.get_xlim()
# y0,y1 = ax.get_ylim()
# ax.set_aspect(abs(x1-x0)/abs(y1-y0))
# ax.grid(b=True, which='major', color='k', linestyle='--')
# fig.savefig('test.png', dpi=600)
# plt.close(fig)
"""
# demo of plotting ellipse
"""
# import matplotlib.pyplot as plt
# import numpy.random as rnd
# from matplotlib.patches import Ellipse
# NUM = 250
# ells = [Ellipse(xy=rnd.rand(2)*10, width=rnd.rand(), height=rnd.rand(), angle=rnd.rand()*360)
# for i in range(NUM)]
# fig = plt.figure(0)
# ax = fig.add_subplot(111, aspect='equal')
# for e in ells:
# ax.add_artist(e)
# e.set_clip_box(ax.bbox)
# e.set_alpha(rnd.rand())
# e.set_facecolor(rnd.rand(3))
# ax.set_xlim(0, 10)
# ax.set_ylim(0, 10)
# plt.show()
import os, sys, time
# interval = 10
# while True:
# for i in range(interval, 0, -1):
# sys.stdout.write("\033[K") # Clear to the end of line
# print("{} model(s) are being recorded. Next check in {} seconds".format(i%3+1, i))
# sys.stdout.write("\033[K")
# time.sleep(1)
# print("the following models are being recorded: {}".format(i), end="\r")
# time.sleep(1)
# sys.stdout.write("\033[F") # Cursor up one line
while True:
print("Yang Li", end='\r')
time.sleep(1)
sys.stdout.write("\033[K") # Clear to the end of line
time.sleep(1)
print("Zhang")
time.sleep(1)
print('Wenxin', end='\r')
time.sleep(1)
sys.stdout.write("\033[K") # Clear to the end of line
sys.stdout.write("\033[F") # Cursor up one line
| gpl-3.0 |
GaryKriebel/osf.io | scripts/annotate_rsvps.py | 60 | 2256 | """Utilities for annotating workshop RSVP data.
Example ::
import pandas as pd
from scripts import annotate_rsvps
frame = pd.read_csv('workshop.csv')
annotated = annotate_rsvps.process(frame)
annotated.to_csv('workshop-annotated.csv')
"""
import re
import logging
from dateutil.parser import parse as parse_date
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from website.models import User, Node, NodeLog
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def find_by_email(email):
try:
return User.find_one(Q('username', 'iexact', email))
except ModularOdmException:
return None
def find_by_name(name):
try:
parts = re.split(r'\s+', name.strip())
except:
return None
if len(parts) < 2:
return None
users = User.find(
reduce(
lambda acc, value: acc & value,
[
Q('fullname', 'icontains', part.decode('utf-8', 'ignore'))
for part in parts
]
)
).sort('-date_created')
if not users:
return None
if len(users) > 1:
logger.warn('Multiple users found for name {}'.format(name))
return users[0]
def logs_since(user, date):
return NodeLog.find(
Q('user', 'eq', user._id) &
Q('date', 'gt', date)
)
def nodes_since(user, date):
return Node.find(
Q('creator', 'eq', user._id) &
Q('date_created', 'gt', date)
)
def process(frame):
frame = frame.copy()
frame['user_id'] = ''
frame['user_logs'] = ''
frame['user_nodes'] = ''
frame['last_log'] = ''
for idx, row in frame.iterrows():
user = (
find_by_email(row['Email address'].strip()) or
find_by_name(row['Name'])
)
if user:
date = parse_date(row['Workshop_date'])
frame.loc[idx, 'user_id'] = user._id
logs = logs_since(user, date)
frame.loc[idx, 'user_logs'] = logs.count()
frame.loc[idx, 'user_nodes'] = nodes_since(user, date).count()
if logs:
frame.loc[idx, 'last_log'] = logs.sort('-date')[0].date.strftime('%c')
return frame
| apache-2.0 |
bikong2/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/semi_supervised/_self_training.py | 4 | 12700 | import warnings
import numpy as np
from ..base import MetaEstimatorMixin, clone, BaseEstimator
from ..utils.validation import check_is_fitted
from ..utils.metaestimators import if_delegate_has_method
from ..utils import safe_mask
__all__ = ["SelfTrainingClassifier"]
# Authors: Oliver Rausch <rauscho@ethz.ch>
# Patrice Becker <beckerp@ethz.ch>
# License: BSD 3 clause
def _validate_estimator(estimator):
"""Make sure that an estimator implements the necessary methods."""
if not hasattr(estimator, "predict_proba"):
msg = "base_estimator ({}) should implement predict_proba!"
raise ValueError(msg.format(type(estimator).__name__))
class SelfTrainingClassifier(MetaEstimatorMixin, BaseEstimator):
"""Self-training classifier.
This class allows a given supervised classifier to function as a
semi-supervised classifier, allowing it to learn from unlabeled data. It
does this by iteratively predicting pseudo-labels for the unlabeled data
and adding them to the training set.
The classifier will continue iterating until either max_iter is reached, or
no pseudo-labels were added to the training set in the previous iteration.
Read more in the :ref:`User Guide <self_training>`.
Parameters
----------
base_estimator : estimator object
An estimator object implementing ``fit`` and ``predict_proba``.
Invoking the ``fit`` method will fit a clone of the passed estimator,
which will be stored in the ``base_estimator_`` attribute.
criterion : {'threshold', 'k_best'}, default='threshold'
The selection criterion used to select which labels to add to the
training set. If 'threshold', pseudo-labels with prediction
probabilities above `threshold` are added to the dataset. If 'k_best',
the `k_best` pseudo-labels with highest prediction probabilities are
added to the dataset. When using the 'threshold' criterion, a
:ref:`well calibrated classifier <calibration>` should be used.
threshold : float, default=0.75
The decision threshold for use with `criterion='threshold'`.
Should be in [0, 1). When using the 'threshold' criterion, a
:ref:`well calibrated classifier <calibration>` should be used.
k_best : int, default=10
The amount of samples to add in each iteration. Only used when
`criterion` is k_best'.
max_iter : int or None, default=10
Maximum number of iterations allowed. Should be greater than or equal
to 0. If it is ``None``, the classifier will continue to predict labels
until no new pseudo-labels are added, or all unlabeled samples have
been labeled.
verbose: bool, default=False
Enable verbose output.
Attributes
----------
base_estimator_ : estimator object
The fitted estimator.
classes_ : ndarray or list of ndarray of shape (n_classes,)
Class labels for each output. (Taken from the trained
``base_estimator_``).
transduction_ : ndarray of shape (n_samples,)
The labels used for the final fit of the classifier, including
pseudo-labels added during fit.
labeled_iter_ : ndarray of shape (n_samples,)
The iteration in which each sample was labeled. When a sample has
iteration 0, the sample was already labeled in the original dataset.
When a sample has iteration -1, the sample was not labeled in any
iteration.
n_iter_ : int
The number of rounds of self-training, that is the number of times the
base estimator is fitted on relabeled variants of the training set.
termination_condition_ : {'max_iter', 'no_change', 'all_labeled'}
The reason that fitting was stopped.
- 'max_iter': `n_iter_` reached `max_iter`.
- 'no_change': no new labels were predicted.
- 'all_labeled': all unlabeled samples were labeled before `max_iter`
was reached.
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import SelfTrainingClassifier
>>> from sklearn.svm import SVC
>>> rng = np.random.RandomState(42)
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = rng.rand(iris.target.shape[0]) < 0.3
>>> iris.target[random_unlabeled_points] = -1
>>> svc = SVC(probability=True, gamma="auto")
>>> self_training_model = SelfTrainingClassifier(svc)
>>> self_training_model.fit(iris.data, iris.target)
SelfTrainingClassifier(...)
References
----------
David Yarowsky. 1995. Unsupervised word sense disambiguation rivaling
supervised methods. In Proceedings of the 33rd annual meeting on
Association for Computational Linguistics (ACL '95). Association for
Computational Linguistics, Stroudsburg, PA, USA, 189-196. DOI:
https://doi.org/10.3115/981658.981684
"""
_estimator_type = "classifier"
def __init__(self,
base_estimator,
threshold=0.75,
criterion='threshold',
k_best=10,
max_iter=10,
verbose=False):
self.base_estimator = base_estimator
self.threshold = threshold
self.criterion = criterion
self.k_best = k_best
self.max_iter = max_iter
self.verbose = verbose
def fit(self, X, y):
"""
Fits this ``SelfTrainingClassifier`` to a dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
y : {array-like, sparse matrix} of shape (n_samples,)
Array representing the labels. Unlabeled samples should have the
label -1.
Returns
-------
self : object
Returns an instance of self.
"""
# we need row slicing support for sparce matrices
X, y = self._validate_data(X, y, accept_sparse=[
'csr', 'csc', 'lil', 'dok'])
if self.base_estimator is None:
raise ValueError("base_estimator cannot be None!")
self.base_estimator_ = clone(self.base_estimator)
if self.max_iter is not None and self.max_iter < 0:
raise ValueError("max_iter must be >= 0 or None,"
f" got {self.max_iter}")
if not (0 <= self.threshold < 1):
raise ValueError("threshold must be in [0,1),"
f" got {self.threshold}")
if self.criterion not in ['threshold', 'k_best']:
raise ValueError(f"criterion must be either 'threshold' "
f"or 'k_best', got {self.criterion}.")
if y.dtype.kind in ['U', 'S']:
raise ValueError("y has dtype string. If you wish to predict on "
"string targets, use dtype object, and use -1"
" as the label for unlabeled samples.")
has_label = y != -1
if np.all(has_label):
warnings.warn("y contains no unlabeled samples", UserWarning)
if self.criterion == 'k_best' and (self.k_best > X.shape[0] -
np.sum(has_label)):
warnings.warn("k_best is larger than the amount of unlabeled "
"samples. All unlabeled samples will be labeled in "
"the first iteration", UserWarning)
self.transduction_ = np.copy(y)
self.labeled_iter_ = np.full_like(y, -1)
self.labeled_iter_[has_label] = 0
self.n_iter_ = 0
while not np.all(has_label) and (self.max_iter is None or
self.n_iter_ < self.max_iter):
self.n_iter_ += 1
self.base_estimator_.fit(
X[safe_mask(X, has_label)],
self.transduction_[has_label])
if self.n_iter_ == 1:
# Only validate in the first iteration so that n_iter=0 is
# equivalent to the base_estimator itself.
_validate_estimator(self.base_estimator)
# Predict on the unlabeled samples
prob = self.base_estimator_.predict_proba(
X[safe_mask(X, ~has_label)])
pred = self.base_estimator_.classes_[np.argmax(prob, axis=1)]
max_proba = np.max(prob, axis=1)
# Select new labeled samples
if self.criterion == 'threshold':
selected = max_proba > self.threshold
else:
n_to_select = min(self.k_best, max_proba.shape[0])
if n_to_select == max_proba.shape[0]:
selected = np.ones_like(max_proba, dtype=bool)
else:
# NB these are indicies, not a mask
selected = \
np.argpartition(-max_proba, n_to_select)[:n_to_select]
# Map selected indices into original array
selected_full = np.nonzero(~has_label)[0][selected]
# Add newly labeled confident predictions to the dataset
self.transduction_[selected_full] = pred[selected]
has_label[selected_full] = True
self.labeled_iter_[selected_full] = self.n_iter_
if selected_full.shape[0] == 0:
# no changed labels
self.termination_condition_ = "no_change"
break
if self.verbose:
print(f"End of iteration {self.n_iter_},"
f" added {selected_full.shape[0]} new labels.")
if self.n_iter_ == self.max_iter:
self.termination_condition_ = "max_iter"
if np.all(has_label):
self.termination_condition_ = "all_labeled"
self.base_estimator_.fit(
X[safe_mask(X, has_label)],
self.transduction_[has_label])
self.classes_ = self.base_estimator_.classes_
return self
@if_delegate_has_method(delegate='base_estimator')
def predict(self, X):
"""Predict the classes of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
Returns
-------
y : ndarray of shape (n_samples,)
Array with predicted labels.
"""
check_is_fitted(self)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Array with prediction probabilities.
"""
check_is_fitted(self)
return self.base_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='base_estimator')
def decision_function(self, X):
"""Calls decision function of the `base_estimator`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Result of the decision function of the `base_estimator`.
"""
check_is_fitted(self)
return self.base_estimator_.decision_function(X)
@if_delegate_has_method(delegate='base_estimator')
def predict_log_proba(self, X):
"""Predict log probability for each possible outcome.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Array with log prediction probabilities.
"""
check_is_fitted(self)
return self.base_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='base_estimator')
def score(self, X, y):
"""Calls score on the `base_estimator`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
y : array-like of shape (n_samples,)
Array representing the labels.
Returns
-------
score : float
Result of calling score on the `base_estimator`.
"""
check_is_fitted(self)
return self.base_estimator_.score(X, y)
| bsd-3-clause |
cshallue/models | research/tcn/generate_videos.py | 5 | 15889 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Generates imitation videos.
Generate single pairwise imitation videos:
blaze build -c opt --config=cuda --copt=-mavx \
learning/brain/research/tcn/generate_videos && \
blaze-bin/learning/brain/research/tcn/generate_videos \
--logtostderr \
--config_paths $config_paths \
--checkpointdir $checkpointdir \
--checkpoint_iter $checkpoint_iter \
--query_records_dir $query_records_dir \
--target_records_dir $target_records_dir \
--outdir $outdir \
--mode single \
--num_query_sequences 1 \
--num_target_sequences -1
# Generate imitation videos with multiple sequences in the target set:
query_records_path
blaze build -c opt --config=cuda --copt=-mavx \
learning/brain/research/tcn/generate_videos && \
blaze-bin/learning/brain/research/tcn/generate_videos \
--logtostderr \
--config_paths $config_paths \
--checkpointdir $checkpointdir \
--checkpoint_iter $checkpoint_iter \
--query_records_dir $query_records_dir \
--target_records_dir $target_records_dir \
--outdir $outdir \
--num_multi_targets 1 \
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import tensorflow as tf
import os
import matplotlib
matplotlib.use("pdf")
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
from estimators.get_estimator import get_estimator
from utils import util
tf.logging.set_verbosity(tf.logging.INFO)
tf.flags.DEFINE_string(
'config_paths', '',
"""
Path to a YAML configuration files defining FLAG values. Multiple files
can be separated by the `#` symbol. Files are merged recursively. Setting
a key in these files is equivalent to setting the FLAG value with
the same name.
""")
tf.flags.DEFINE_string(
'model_params', '{}', 'YAML configuration string for the model parameters.')
tf.app.flags.DEFINE_string(
'checkpointdir', '/tmp/tcn', 'Path to model checkpoints.')
tf.app.flags.DEFINE_string(
'checkpoint_iter', '', 'Checkpoint iter to use.')
tf.app.flags.DEFINE_integer(
'num_multi_targets', -1,
'Number of imitation vids in the target set per imitation video.')
tf.app.flags.DEFINE_string(
'outdir', '/tmp/tcn', 'Path to write embeddings to.')
tf.app.flags.DEFINE_string(
'mode', 'single', 'single | multi. Single means generate imitation vids'
'where query is being imitated by single sequence. Multi'
'means generate imitation vids where query is being'
'imitated by multiple.')
tf.app.flags.DEFINE_string('query_records_dir', '',
'Directory of image tfrecords.')
tf.app.flags.DEFINE_string('target_records_dir', '',
'Directory of image tfrecords.')
tf.app.flags.DEFINE_integer('query_view', 1,
'Viewpoint of the query video.')
tf.app.flags.DEFINE_integer('target_view', 0,
'Viewpoint of the imitation video.')
tf.app.flags.DEFINE_integer('smoothing_window', 5,
'Number of frames to smooth over.')
tf.app.flags.DEFINE_integer('num_query_sequences', -1,
'Number of query sequences to embed.')
tf.app.flags.DEFINE_integer('num_target_sequences', -1,
'Number of target sequences to embed.')
FLAGS = tf.app.flags.FLAGS
def SmoothEmbeddings(embs):
"""Temporally smoothes a sequence of embeddings."""
new_embs = []
window = int(FLAGS.smoothing_window)
for i in range(len(embs)):
min_i = max(i-window, 0)
max_i = min(i+window, len(embs))
new_embs.append(np.mean(embs[min_i:max_i, :], axis=0))
return np.array(new_embs)
def MakeImitationVideo(
outdir, vidname, query_im_strs, knn_im_strs, height=640, width=360):
"""Creates a KNN imitation video.
For each frame in vid0, pair with the frame at index in knn_indices in
vids1. Write video to disk.
Args:
outdir: String, directory to write videos.
vidname: String, name of video.
query_im_strs: Numpy array holding query image strings.
knn_im_strs: Numpy array holding knn image strings.
height: Int, height of raw images.
width: Int, width of raw images.
"""
if not tf.gfile.Exists(outdir):
tf.gfile.MakeDirs(outdir)
vid_path = os.path.join(outdir, vidname)
combined = zip(query_im_strs, knn_im_strs)
# Create and write the video.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
im = ax.imshow(
np.zeros((height, width*2, 3)), cmap='gray', interpolation='nearest')
im.set_clim([0, 1])
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
# pylint: disable=invalid-name
def update_img(pair):
"""Decode pairs of image strings, update a video."""
im_i, im_j = pair
nparr_i = np.fromstring(str(im_i), np.uint8)
img_np_i = cv2.imdecode(nparr_i, 1)
img_np_i = img_np_i[..., [2, 1, 0]]
nparr_j = np.fromstring(str(im_j), np.uint8)
img_np_j = cv2.imdecode(nparr_j, 1)
img_np_j = img_np_j[..., [2, 1, 0]]
# Optionally reshape the images to be same size.
frame = np.concatenate([img_np_i, img_np_j], axis=1)
im.set_data(frame)
return im
ani = animation.FuncAnimation(fig, update_img, combined, interval=15)
writer = animation.writers['ffmpeg'](fps=15)
dpi = 100
tf.logging.info('Writing video to:\n %s \n' % vid_path)
ani.save('%s.mp4' % vid_path, writer=writer, dpi=dpi)
def GenerateImitationVideo(
vid_name, query_ims, query_embs, target_ims, target_embs, height, width):
"""Generates a single cross-sequence imitation video.
For each frame in some query sequence, find the nearest neighbor from
some target sequence in embedding space.
Args:
vid_name: String, the name of the video.
query_ims: Numpy array of shape [query sequence length, height, width, 3].
query_embs: Numpy array of shape [query sequence length, embedding size].
target_ims: Numpy array of shape [target sequence length, height, width,
3].
target_embs: Numpy array of shape [target sequence length, embedding
size].
height: Int, height of the raw image.
width: Int, width of the raw image.
"""
# For each query frame, find the index of the nearest neighbor in the
# target video.
knn_indices = [util.KNNIds(q, target_embs, k=1)[0] for q in query_embs]
# Create and write out the video.
assert knn_indices
knn_ims = np.array([target_ims[k] for k in knn_indices])
MakeImitationVideo(FLAGS.outdir, vid_name, query_ims, knn_ims, height, width)
def SingleImitationVideos(
query_records, target_records, config, height, width):
"""Generates pairwise imitation videos.
This creates all pairs of target imitating query videos, where each frame
on the left is matched to a nearest neighbor coming a single
embedded target video.
Args:
query_records: List of Strings, paths to tfrecord datasets to use as
queries.
target_records: List of Strings, paths to tfrecord datasets to use as
targets.
config: A T object describing training config.
height: Int, height of the raw image.
width: Int, width of the raw image.
"""
# Embed query and target data.
(query_sequences_to_data,
target_sequences_to_data) = EmbedQueryTargetData(
query_records, target_records, config)
qview = FLAGS.query_view
tview = FLAGS.target_view
# Loop over query videos.
for task_i, data_i in query_sequences_to_data.iteritems():
for task_j, data_j in target_sequences_to_data.iteritems():
i_ims = data_i['images']
i_embs = data_i['embeddings']
query_embs = SmoothEmbeddings(i_embs[qview])
query_ims = i_ims[qview]
j_ims = data_j['images']
j_embs = data_j['embeddings']
target_embs = SmoothEmbeddings(j_embs[tview])
target_ims = j_ims[tview]
tf.logging.info('Generating %s imitating %s video.' % (task_j, task_i))
vid_name = 'q%sv%s_im%sv%s' % (task_i, qview, task_j, tview)
vid_name = vid_name.replace('/', '_')
GenerateImitationVideo(vid_name, query_ims, query_embs,
target_ims, target_embs, height, width)
def MultiImitationVideos(
query_records, target_records, config, height, width):
"""Creates multi-imitation videos.
This creates videos where every frame on the left is matched to a nearest
neighbor coming from a set of multiple embedded target videos.
Args:
query_records: List of Strings, paths to tfrecord datasets to use as
queries.
target_records: List of Strings, paths to tfrecord datasets to use as
targets.
config: A T object describing training config.
height: Int, height of the raw image.
width: Int, width of the raw image.
"""
# Embed query and target data.
(query_sequences_to_data,
target_sequences_to_data) = EmbedQueryTargetData(
query_records, target_records, config)
qview = FLAGS.query_view
tview = FLAGS.target_view
# Loop over query videos.
for task_i, data_i in query_sequences_to_data.iteritems():
i_ims = data_i['images']
i_embs = data_i['embeddings']
query_embs = SmoothEmbeddings(i_embs[qview])
query_ims = i_ims[qview]
all_target_embs = []
all_target_ims = []
# If num_imitation_vids is -1, add all seq embeddings to the target set.
if FLAGS.num_multi_targets == -1:
num_multi_targets = len(target_sequences_to_data)
else:
# Else, add some specified number of seq embeddings to the target set.
num_multi_targets = FLAGS.num_multi_targets
for j in range(num_multi_targets):
task_j = target_sequences_to_data.keys()[j]
data_j = target_sequences_to_data[task_j]
print('Adding %s to target set' % task_j)
j_ims = data_j['images']
j_embs = data_j['embeddings']
target_embs = SmoothEmbeddings(j_embs[tview])
target_ims = j_ims[tview]
all_target_embs.extend(target_embs)
all_target_ims.extend(target_ims)
# Generate a "j imitating i" video.
tf.logging.info('Generating all imitating %s video.' % task_i)
vid_name = 'q%sv%s_multiv%s' % (task_i, qview, tview)
vid_name = vid_name.replace('/', '_')
GenerateImitationVideo(vid_name, query_ims, query_embs,
all_target_ims, all_target_embs, height, width)
def SameSequenceVideos(query_records, config, height, width):
"""Generate same sequence, cross-view imitation videos."""
batch_size = config.data.embed_batch_size
# Choose an estimator based on training strategy.
estimator = get_estimator(config, FLAGS.checkpointdir)
# Choose a checkpoint path to restore.
checkpointdir = FLAGS.checkpointdir
checkpoint_path = os.path.join(checkpointdir,
'model.ckpt-%s' % FLAGS.checkpoint_iter)
# Embed num_sequences query sequences, store embeddings and image strings in
# query_sequences_to_data.
sequences_to_data = {}
for (view_embeddings, view_raw_image_strings, seqname) in estimator.inference(
query_records, checkpoint_path, batch_size,
num_sequences=FLAGS.num_query_sequences):
sequences_to_data[seqname] = {
'embeddings': view_embeddings,
'images': view_raw_image_strings,
}
# Loop over query videos.
qview = FLAGS.query_view
tview = FLAGS.target_view
for task_i, data_i in sequences_to_data.iteritems():
ims = data_i['images']
embs = data_i['embeddings']
query_embs = SmoothEmbeddings(embs[qview])
query_ims = ims[qview]
target_embs = SmoothEmbeddings(embs[tview])
target_ims = ims[tview]
tf.logging.info('Generating %s imitating %s video.' % (task_i, task_i))
vid_name = 'q%sv%s_im%sv%s' % (task_i, qview, task_i, tview)
vid_name = vid_name.replace('/', '_')
GenerateImitationVideo(vid_name, query_ims, query_embs,
target_ims, target_embs, height, width)
def EmbedQueryTargetData(query_records, target_records, config):
"""Embeds the full set of query_records and target_records.
Args:
query_records: List of Strings, paths to tfrecord datasets to use as
queries.
target_records: List of Strings, paths to tfrecord datasets to use as
targets.
config: A T object describing training config.
Returns:
query_sequences_to_data: A dict holding 'embeddings' and 'images'
target_sequences_to_data: A dict holding 'embeddings' and 'images'
"""
batch_size = config.data.embed_batch_size
# Choose an estimator based on training strategy.
estimator = get_estimator(config, FLAGS.checkpointdir)
# Choose a checkpoint path to restore.
checkpointdir = FLAGS.checkpointdir
checkpoint_path = os.path.join(checkpointdir,
'model.ckpt-%s' % FLAGS.checkpoint_iter)
# Embed num_sequences query sequences, store embeddings and image strings in
# query_sequences_to_data.
num_query_sequences = FLAGS.num_query_sequences
num_target_sequences = FLAGS.num_target_sequences
query_sequences_to_data = {}
for (view_embeddings, view_raw_image_strings, seqname) in estimator.inference(
query_records, checkpoint_path, batch_size,
num_sequences=num_query_sequences):
query_sequences_to_data[seqname] = {
'embeddings': view_embeddings,
'images': view_raw_image_strings,
}
if (query_records == target_records) and (
num_query_sequences == num_target_sequences):
target_sequences_to_data = query_sequences_to_data
else:
# Embed num_sequences target sequences, store embeddings and image strings
# in sequences_to_data.
target_sequences_to_data = {}
for (view_embeddings, view_raw_image_strings,
seqname) in estimator.inference(
target_records, checkpoint_path, batch_size,
num_sequences=num_target_sequences):
target_sequences_to_data[seqname] = {
'embeddings': view_embeddings,
'images': view_raw_image_strings,
}
return query_sequences_to_data, target_sequences_to_data
def main(_):
# Parse config dict from yaml config files / command line flags.
config = util.ParseConfigsToLuaTable(FLAGS.config_paths, FLAGS.model_params)
# Get tables to embed.
query_records_dir = FLAGS.query_records_dir
query_records = util.GetFilesRecursively(query_records_dir)
target_records_dir = FLAGS.target_records_dir
target_records = util.GetFilesRecursively(target_records_dir)
height = config.data.raw_height
width = config.data.raw_width
mode = FLAGS.mode
if mode == 'multi':
# Generate videos where target set is composed of multiple videos.
MultiImitationVideos(query_records, target_records, config,
height, width)
elif mode == 'single':
# Generate videos where target set is a single video.
SingleImitationVideos(query_records, target_records, config,
height, width)
elif mode == 'same':
# Generate videos where target set is the same as query, but diff view.
SameSequenceVideos(query_records, config, height, width)
else:
raise ValueError('Unknown mode %s' % mode)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
asm666/sympy | sympy/interactive/tests/test_ipythonprinting.py | 27 | 5891 | """Tests that the IPython printing module is properly loaded. """
from sympy.core.compatibility import u
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
from sympy.utilities.pytest import raises
# run_cell was added in IPython 0.11
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] == "pi"
assert app.user_ns['a2']['text/plain'] == "pi**2"
else:
assert app.user_ns['a'][0]['text/plain'] == "pi"
assert app.user_ns['a2'][0]['text/plain'] == "pi**2"
# Load printing extension
app.run_cell("from sympy import init_printing")
app.run_cell("init_printing()")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2']['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
else:
assert app.user_ns['a'][0]['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2'][0]['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
def test_print_builtin_option():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
app.run_cell("from sympy import init_printing")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : In Python 3 the text is unicode, but in 2 it is a string.
# XXX: How can we make this ignore the terminal width? This test fails if
# the terminal is too narrow.
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
# If we enable the default printing, then the dictionary's should render
# as a LaTeX version of the whole dict: ${\pi: 3.14, n_i: 3}$
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
latex = app.user_ns['a']['text/latex']
else:
text = app.user_ns['a'][0]['text/plain']
latex = app.user_ns['a'][0]['text/latex']
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
assert latex == r'$$\left \{ n_{i} : 3, \quad \pi : 3.14\right \}$$'
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True, print_builtin=False)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : In Python 3 the text is unicode, but in 2 it is a string.
# Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}'
# Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}'
# Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}'
assert text in ("{pi: 3.14, n_i: 3}", "{n_i: 3, pi: 3.14}")
def test_matplotlib_bad_latex():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("import IPython")
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import init_printing, Matrix")
app.run_cell("init_printing(use_latex='matplotlib')")
# The png formatter is not enabled by default in this context
app.run_cell("inst.display_formatter.formatters['image/png'].enabled = True")
# Make sure no warnings are raised by IPython
app.run_cell("import warnings")
app.run_cell("warnings.simplefilter('error', IPython.core.formatters.FormatterWarning)")
# This should not raise an exception
app.run_cell("a = format(Matrix([1, 2, 3]))")
| bsd-3-clause |
aiguofer/bokeh | examples/charts/file/timeseries.py | 4 | 2104 | import pandas as pd
from bokeh.charts import TimeSeries, show, output_file
from bokeh.layouts import column
# read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
data = pd.DataFrame(data=dict(AAPL=AAPL['Adj Close'][:1000],
MSFT=MSFT['Adj Close'][:1000],
IBM=IBM['Adj Close'][:1000],
Date=AAPL['Date'][:1000])).set_index('Date')
TOOLS="pan,wheel_zoom,box_zoom,reset,save"
tooltips=[
("Open", "@Open"),
("Close", "@Close"),
("High", "@High"),
("Low", "@Low"),
("Volume", "@Volume")
]
# line simple
tsline = TimeSeries(
data, y=['IBM', 'MSFT', 'AAPL'], legend=True,
title="Timeseries (Line)", tools=TOOLS, ylabel='Stock Prices',
xlabel='Date')
# line explicit
tsline2 = TimeSeries(
data, y=['IBM', 'MSFT', 'AAPL'], legend=True,
color=['IBM', 'MSFT', 'AAPL'], dash=['IBM', 'MSFT', 'AAPL'],
title="Timeseries (Line Explicit)", tools=TOOLS, ylabel='Stock Prices',
xlabel='Date')
# line w/ tooltips
tsline3 = TimeSeries(
AAPL, y='Adj Close', x='Date', title="Timeseries (Tooltips)",
tooltips=tooltips)
# step
tsstep = TimeSeries(
data, y=['IBM', 'MSFT', 'AAPL'], legend=True, builder_type='step',
title="Timeseries (Step)", tools=TOOLS, ylabel='Stock Prices',
xlabel='Date')
# point
tspoint = TimeSeries(
data, y=['IBM', 'MSFT', 'AAPL'], legend=True, builder_type='point',
marker=['IBM', 'MSFT', 'AAPL'], color=['IBM', 'MSFT', 'AAPL'],
title="Timeseries (Point)", tools=TOOLS, ylabel='Stock Prices',
xlabel='Date')
output_file("timeseries.html", title="timeseries.py example")
show(column(tsline, tsline2, tsline3, tsstep, tspoint))
| bsd-3-clause |
amenonsen/ansible | hacking/cgroup_perf_recap_graph.py | 54 | 4384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2018, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import argparse
import csv
from collections import namedtuple
try:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
except ImportError:
raise SystemExit('matplotlib is required for this script to work')
Data = namedtuple('Data', ['axis_name', 'dates', 'names', 'values'])
def task_start_ticks(dates, names):
item = None
ret = []
for i, name in enumerate(names):
if name == item:
continue
item = name
ret.append((dates[i], name))
return ret
def create_axis_data(filename, relative=False):
x_base = None if relative else 0
axis_name, dummy = os.path.splitext(os.path.basename(filename))
dates = []
names = []
values = []
with open(filename) as f:
reader = csv.reader(f)
for row in reader:
if x_base is None:
x_base = float(row[0])
dates.append(mdates.epoch2num(float(row[0]) - x_base))
names.append(row[1])
values.append(float(row[3]))
return Data(axis_name, dates, names, values)
def create_graph(data1, data2, width=11.0, height=8.0, filename='out.png', title=None):
fig, ax1 = plt.subplots(figsize=(width, height), dpi=300)
task_ticks = task_start_ticks(data1.dates, data1.names)
ax1.grid(linestyle='dashed', color='lightgray')
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%X'))
ax1.plot(data1.dates, data1.values, 'b-')
if title:
ax1.set_title(title)
ax1.set_xlabel('Time')
ax1.set_ylabel(data1.axis_name, color='b')
for item in ax1.get_xticklabels():
item.set_rotation(60)
ax2 = ax1.twiny()
ax2.set_xticks([x[0] for x in task_ticks])
ax2.set_xticklabels([x[1] for x in task_ticks])
ax2.grid(axis='x', linestyle='dashed', color='lightgray')
ax2.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_label_position('bottom')
ax2.spines['bottom'].set_position(('outward', 86))
ax2.set_xlabel('Task')
ax2.set_xlim(ax1.get_xlim())
for item in ax2.get_xticklabels():
item.set_rotation(60)
ax3 = ax1.twinx()
ax3.plot(data2.dates, data2.values, 'g-')
ax3.set_ylabel(data2.axis_name, color='g')
fig.tight_layout()
fig.savefig(filename, format='png')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs=2, help='2 CSV files produced by cgroup_perf_recap to graph together')
parser.add_argument('--relative', default=False, action='store_true',
help='Use relative dates instead of absolute')
parser.add_argument('--output', default='out.png', help='output path of PNG file: Default %s(default)s')
parser.add_argument('--width', type=float, default=11.0,
help='Width of output image in inches. Default %(default)s')
parser.add_argument('--height', type=float, default=8.0,
help='Height of output image in inches. Default %(default)s')
parser.add_argument('--title', help='Title for graph')
return parser.parse_args()
def main():
args = parse_args()
data1 = create_axis_data(args.files[0], relative=args.relative)
data2 = create_axis_data(args.files[1], relative=args.relative)
create_graph(data1, data2, width=args.width, height=args.height, filename=args.output, title=args.title)
print('Graph written to %s' % os.path.abspath(args.output))
if __name__ == '__main__':
main()
| gpl-3.0 |
guthemberg/yanoama | yanoama/monitoring/rtt_matrix/reload.py | 1 | 3023 | import pickle,sys,socket,subprocess,os,random
from datetime import datetime
from time import time
#from planetlab import Monitor
from configobj import ConfigObj
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans
def save_object_to_file(myobject,output_file):
f = open(output_file,'w')
pickle.dump(myobject, f)
f.close()
def load_object_from_file(input_file):
return pickle.load( open( input_file, "rb" ) )
def cleanup_measurements(host_table,list_of_nodes):
measurements=[]
for node in host_table:
if node in list_of_nodes:
if host_table[node]>0.0:
measurements.append(host_table[node])
else:
measurements.append(0.0)
return measurements
if __name__ == '__main__':
measuremants_dir=sys.argv[1]
list_of_nodes=(sys.argv[2]).split(" ")
max_n_clusters=int(sys.argv[3])
# host_table=load_object_from_file(sys.argv[3])
file_table_suffix="_rtt_matrix.pck"
host_measurements=[]
for node in list_of_nodes:
if len(node)>0:
node_table=load_object_from_file(measuremants_dir+"/"+node+file_table_suffix)
host_measurements.append(cleanup_measurements(node_table, list_of_nodes))
inertia=-1.0
best_n=3
best_mbk=None
gain=0
last_inertia=0.0
initian_inertia=0.0
acc_gain=0.0
last_acc_gain=0.0
diff_acc_gain=0.0
for n_clusters in range(1,max_n_clusters):
mbk = MiniBatchKMeans(init='k-means++', n_clusters=n_clusters, n_init=10, verbose=0)
X=np.array(host_measurements,dtype=float)
mbk.fit(X)
if inertia == -1.0:
inertia=mbk.inertia_
best_n=n_clusters
best_mbk=mbk
last_inertia=mbk.inertia_
initian_inertia=last_inertia
elif mbk.inertia_<inertia:
inertia=mbk.inertia_
best_n=n_clusters
best_mbk=mbk
gain=abs((inertia-last_inertia)/(last_inertia))
acc_gain=abs((inertia-initian_inertia)/(initian_inertia))
# WE NEED TO FIX THIS diff_
diff_acc_gain=(acc_gain-last_acc_gain)*100
last_acc_gain=acc_gain
last_inertia=mbk.inertia_
print "number of clusters: %d, inertia: %.4f, gain: %.4f, acc. gain: %.4f, diff acc gain: %.4f"%(n_clusters,mbk.inertia_,gain,acc_gain,diff_acc_gain)
#save objects
save_object_to_file(list_of_nodes, "/tmp/nodes_list.pck")
save_object_to_file(best_mbk, "/tmp/mbk.pck")
sys.exit(0)
#
# # Compute clustering with MiniBatchKMeans
#
# mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
# n_init=10, max_no_improvement=10, verbose=0)
# t0 = time.time()
# mbk.fit(X)
# t_mini_batch = time.time() - t0
# mbk_means_labels = mbk.labels_
# mbk_means_cluster_centers = mbk.cluster_centers_
# mbk_means_labels_unique = np.unique(mbk_means_labels) | bsd-3-clause |
aidiary/keras_examples | cnn/cifar10/plot_results.py | 1 | 1468 | import os
import matplotlib.pyplot as plt
result_file1 = os.path.join('result_without_datagen', 'history.txt')
result_file2 = os.path.join('result_with_datagen', 'history.txt')
def load_results(filename):
epoch_list = []
val_loss_list = []
val_acc_list = []
with open(filename) as fp:
fp.readline() # skip title
for line in fp:
line = line.rstrip()
cols = line.split('\t')
assert len(cols) == 5
epoch = int(cols[0])
loss = float(cols[1])
acc = float(cols[2])
val_loss = float(cols[3])
val_acc = float(cols[4])
epoch_list.append(epoch)
val_loss_list.append(val_loss)
val_acc_list.append(val_acc)
return epoch_list, val_loss_list, val_acc_list
epoch1, val_loss1, val_acc1 = load_results(result_file1)
epoch2, val_loss2, val_acc2 = load_results(result_file2)
plt.figure()
plt.plot(epoch1, val_loss1, 'b-', marker='.', label='without datagen')
plt.plot(epoch2, val_loss2, 'r-', marker='.', label='with datagen')
plt.grid()
plt.legend()
plt.xlabel('epoch')
plt.ylabel('val_loss')
# plt.show()
plt.savefig('val_loss.png')
plt.figure()
plt.plot(epoch1, val_acc1, 'b-', marker='.', label='without datagen')
plt.plot(epoch2, val_acc2, 'r-', marker='.', label='with datagen')
plt.grid()
plt.legend(loc='lower right')
plt.xlabel('epoch')
plt.ylabel('val_acc')
# plt.show()
plt.savefig('val_acc.png')
| mit |
nanophotonics/nplab | nplab/instrument/electronics/SLM/pattern_generators.py | 1 | 15649 | # -*- coding: utf-8 -*-
from __future__ import division
from builtins import zip
from builtins import range
from past.utils import old_div
import numpy as np
# import pyfftw
from scipy import misc
import matplotlib.pyplot as plt
from matplotlib import gridspec
# TODO: performance quantifiers for IFT algorithms (smoothness, efficiency)
# TODO: compare initial phase methods in IFT algorithms: quadratic phase; starting in the real plane with a flat phase
# TODO: compare CPU and GPU
def _get_coordinate_arrays(image, center=None):
"""Creates coordinate arrays in pixel units
:param image: 2D array
:param center: two-tuple of floats. If <1, assumes it's a relative center (with the edges of the SLM being at
[-1, 1]. Otherwise, it should be in pixel units
:return: two 2D arrays of coordinates
"""
shape = np.shape(image)
if center is None:
center = [int(old_div(s, 2)) for s in shape]
elif any(np.array(center) < 1):
center = [int(old_div(s, 2) + c*s) for s, c in zip(shape, center)]
yx = [np.arange(shape[idx]) - center[idx] for idx in range(2)[::-1]]
x, y = np.meshgrid(*yx)
return x, y
def constant(input_phase, offset):
return input_phase + offset
def calibration_responsiveness(input_phase, grey_level, axis=0):
"""Function for calibrating the phase retardation as a function of addressing voltage
Need to image the reflected beam directly onto a camera, creating fringes. The fringe shift as a function of voltage
gives the responsiveness. Note it assumes the retardation is the same across the SLM. If this were not the case, see
https://doi.org/10.1364/AO.43.006400 for how to measure it.
:param input_phase:
:param grey_level:
:param axis:
:return:
"""
shape = np.shape(input_phase)
centers = [int(old_div(x, 2)) for x in shape]
out_phase = np.zeros(shape)
if axis == 0:
out_phase[centers[0]:] = grey_level
elif axis == 1:
out_phase[:, centers[1]:] = grey_level
else:
raise ValueError('Unrecognised axis: %d' % axis)
return out_phase
def gratings(input_phase, grating_const_x=0, grating_const_y=0):
"""Linear phase pattern corresponding to a grating/mirror
:param input_phase:
:param grating_const_x: float. Period (in pixels) of the grating along the x direction. Default is no grating
:param grating_const_y: float. Period (in pixels) of the grating along the y direction. Default is no grating
:return:
"""
x, y = _get_coordinate_arrays(input_phase)
phase = np.zeros(x.shape)
if np.abs(grating_const_x) > 1:
phase += (2 * np.pi / grating_const_x) * x
if np.abs(grating_const_y) > 1:
phase += (2 * np.pi / grating_const_y) * y
return input_phase + phase
def multispot_grating(input_phase, grating_const, n_spot, center=None):
"""
:param input_phase:
:param grating_const: float. Inverse period (in pixels) of the grating.
:param n_spot: int. Number of gratings to divide the SLM in.
:param center: two-tuple of floats. To be passed to _get_coordinate_arrays
:return:
"""
x, y = _get_coordinate_arrays(input_phase, center)
theta = np.arctan2(y, x) + np.pi
phase = np.zeros(x.shape)
if n_spot > 1:
for i in range(n_spot):
gx = np.pi * grating_const * np.cos((i + 0.5) * 2 * np.pi / n_spot)
gy = np.pi * grating_const * np.sin((i + 0.5) * 2 * np.pi / n_spot)
mask = np.zeros(x.shape)
mask[theta <= (i+1) * 2 * np.pi / n_spot] = 1
mask[theta <= i * 2 * np.pi / n_spot] = 0
phase += (x * gx + y * gy) * mask
return input_phase + phase
def focus(input_phase, curvature=0, center=None):
"""Quadratic phase pattern corresponding to a perfect lens
:param input_phase:
:param curvature: float. Inverse focal length of the lens in arbitrary units
:param center: two-tuple of floats. To be passed to _get_coordinate_arrays
:return:
"""
x, y = _get_coordinate_arrays(input_phase, center)
phase = curvature * (x ** 2 + y ** 2)
return input_phase + phase
def astigmatism(input_phase, amplitude=0, angle=0, center=None):
"""Cylindrical phase pattern corresponding to astigmatism
:param input_phase:
:param amplitude: float. cylindrical curvature
:param angle: float. angle between the cylindrical curvature and the input axes
:param center: two-tuple of floats. To be passed to _get_coordinate_arrays
:return:
"""
x, y = _get_coordinate_arrays(input_phase, center)
rho = np.sqrt(x ** 2 + y ** 2)
phi = np.arctan2(x, y)
horizontal = amplitude * np.cos(angle * np.pi / 180)
diagonal = amplitude * np.sin(angle * np.pi / 180)
phase = (horizontal * np.cos(2 * phi) + diagonal * np.sin(2 * phi)) * rho ** 2
return input_phase + phase
def vortexbeam(input_phase, order, angle, center=None):
"""Vortices
:param input_phase:
:param order: int. Vortex order
:param angle: float. Orientation of the vortex, in degrees
:param center: two-iterable of integers. Location of the center of the vortex on the SLM panel
:return:
"""
# shape = np.shape(input_phase)
# if center is None:
# center = [int(old_div(x, 2)) for x in shape]
# elif any(np.array(center) < 1):
# center = [int(old_div(x, 2) + y*x) for x, y in zip(shape, center)]
#
# x = np.arange(shape[1]) - center[1]
# y = np.arange(shape[0]) - center[0]
# x, y = np.meshgrid(x, y)
x, y = _get_coordinate_arrays(input_phase, center)
phase = order * (np.angle(x + y * 1j) + angle * np.pi / 180.)
return input_phase + phase
def linear_lut(input_phase, contrast, offset):
"""
:param input_phase:
:param contrast:
:param offset:
:return:
"""
out_phase = np.copy(input_phase)
# out_phase -= out_phase.min()
out_phase %= 2 * np.pi - 0.000001
out_phase *= contrast
out_phase += offset * np.pi
return out_phase
"""Iterative Fourier Transform algorithms"""
def direct_superposition(input_phase, k_vectors, phases=None):
if phases is None:
phases = np.random.random(len(k_vectors))
shape = np.shape(input_phase)
x = np.arange(shape[1]) - int(old_div(shape[1], 2))
y = np.arange(shape[0]) - int(old_div(shape[0], 2))
x, y = np.meshgrid(x, y)
real_plane = np.zeros(shape)
for k_vec, phase in zip(k_vectors, phases):
real_plane += np.exp(1j * 2 * np.pi * (k_vec[0] * x + k_vec[1] * y + phase))
return input_phase + np.angle(np.fft.fftshift(np.fft.fft2(real_plane)))
def mraf(original_phase, target_intensity, input_field=None, mixing_ratio=0.4, signal_region_size=0.5, iterations=30):
"""Mixed-Region Amplitude Freedom algorithm for continuous patterns https://doi.org/10.1364/OE.16.002176
:param original_phase:
:param target_intensity:
:param input_field:
:param mixing_ratio:
:param signal_region_size:
:param iterations:
:return:
"""
shp = target_intensity.shape
x, y = np.ogrid[old_div(-shp[1], 2):old_div(shp[1], 2), old_div(-shp[0], 2):old_div(shp[0], 2)]
x, y = np.meshgrid(x, y)
target_intensity = np.asarray(target_intensity, np.float)
if input_field is None:
# By default, the initial phase focuses a uniform SLM illumination onto the signal region
input_phase = ((old_div(x ** 2, (old_div(shp[1], (signal_region_size * 2 * np.sqrt(2)))))) +
(old_div(y ** 2, (old_div(shp[0], (signal_region_size * 2 * np.sqrt(2)))))))
input_field = np.exp(1j * input_phase)
# Normalising the input field and target intensity to 1 (doesn't have to be 1, but they have to be equal)
input_field /= np.sqrt(np.sum(np.abs(input_field)**2))
target_intensity /= np.sum(target_intensity)
# This can leave the center of the SLM one or two pixels
mask = (x**2 + y**2) < (signal_region_size * np.min(shp))**2
signal_region = np.ones(shp) * mixing_ratio
signal_region[~mask] = 0
noise_region = np.ones(shp) * (1 - mixing_ratio)
noise_region[mask] = 0
input_intensity = np.abs(input_field)**2
for _ in range(iterations):
output_field = np.fft.fft2(input_field)
# makes sure power out = power in, so that the distribution of power in signal and noise regions makes sense
output_field = old_div(output_field, np.sqrt(np.prod(shp)))
output_field = np.fft.fftshift(output_field)
output_phase = np.angle(output_field)
mixed_field = signal_region * np.sqrt(target_intensity) * np.exp(1j * output_phase) + noise_region * output_field
mixed_field = np.fft.ifftshift(mixed_field)
input_field = np.fft.ifft2(mixed_field)
input_phase = np.angle(input_field)
input_field = np.sqrt(input_intensity) * np.exp(1j*input_phase)
# print(np.sum(np.abs(input_field)**2), np.sum(target_intensity), np.sum(np.abs(output_field)**2))
return original_phase + input_phase
def gerchberg_saxton(original_phase, target_intensity, input_field=None, iterations=30):
"""Gerchberg Saxton algorithm for continuous patterns
Easiest version, where you don't need to keep track of FFT factors, normalising intensities, or FFT shifts since it
all gets discarded anyway.
:param original_phase:
:param target_intensity:
:param input_field:
:param iterations:
:return:
"""
assert iterations > 0
shp = target_intensity.shape
target_intensity = np.fft.fftshift(target_intensity) # this matrix is only used in the Fourier plane
if input_field is None:
input_field = np.ones(shp) * np.exp(1j * np.zeros(shp))
input_intensity = np.abs(input_field) ** 2
for _ in range(iterations):
output_field = np.fft.fft2(input_field) # don't have to normalise since the intensities are replaced
output_phase = np.angle(output_field)
output_field = np.sqrt(target_intensity) * np.exp(1j * output_phase)
input_field = np.fft.ifft2(output_field)
input_phase = np.angle(input_field)
input_field = np.sqrt(input_intensity) * np.exp(1j * input_phase)
return original_phase + input_phase
def test_ifft_smoothness(alg_func, *args, **kwargs):
"""Evaluates smoothness of calculated vs target pattern as a function of iteration in an IFFT algorithm
Smoothness is defined as the sum of absolute difference over the area of interest. For most algorithms the area of
interest is the whole plane, while for MRAF the area of interest is only the signal region
:param alg_func:
:param args:
:param kwargs:
:return:
"""
target = np.asarray(misc.face()[:, :, 0], np.float)
x, y = _get_coordinate_arrays(target)
shp = target.shape
# x, y = np.ogrid[old_div(-shp[1], 2):old_div(shp[1], 2), old_div(-shp[0], 2):old_div(shp[0], 2)]
# x, y = np.meshgrid(x, y)
mask = (x**2 + y**2) > (0.2 * np.min(shp))**2
target[mask] = 0
target /= np.sum(target)
iterations = 60
if 'iterations' in kwargs:
iterations = kwargs['iterations']
# The algorithms only return the final phase, so to evaluate the smoothness at each iteration, need to set the
# algorithm to only run one step at a time
kwargs['iterations'] = 1
# Defining a mask and a mixing_ratio for calculating the smoothness later
if alg_func == gerchberg_saxton:
mask = np.ones(shp, dtype=np.bool)
mixing_ratio = 1
elif alg_func == mraf:
x, y = np.ogrid[old_div(-shp[1], 2):old_div(shp[1], 2), old_div(-shp[0], 2):old_div(shp[0], 2)]
x, y = np.meshgrid(x, y)
signal_region_size = 0.5
if 'signal_region_size' in kwargs:
signal_region_size = kwargs['signal_region_size']
mask = (x**2 + y**2) < (signal_region_size * np.min(shp))**2
mixing_ratio = 0.4
if 'mixing_ratio' in kwargs:
mixing_ratio = kwargs['mixing_ratio']
else:
raise ValueError('Unrecognised algorithm')
smth = []
outputs = []
for indx in range(iterations):
init_phase = alg_func(0, target, *args, **kwargs)
input_field = np.exp(1j * init_phase)
kwargs['input_field'] = input_field
output = old_div(np.fft.fftshift(np.fft.fft2(np.exp(1j * init_phase))), (np.prod(shp)))
output_int = np.abs(output) ** 2
# print(np.sum(np.abs(output_int)), np.sum(np.abs(output_int)[mask]))
smth += [old_div(np.sum(np.abs(output_int - mixing_ratio*target)[mask]), np.sum(mask))]
outputs += [output]
fig = plt.figure(figsize=(old_div(8*shp[1],shp[0])*2, 8))
gs = gridspec.GridSpec(1, 2)
gs2 = gridspec.GridSpecFromSubplotSpec(5, 6, gs[0], 0.001, 0.001)
reindex = np.linspace(0, iterations-1, 30)
ax = None
for indx, _gs in zip(reindex, gs2):
indx = int(indx)
ax = plt.subplot(_gs, sharex=ax, sharey=ax)
ax.imshow(np.abs(outputs[indx]))
ax.text(shp[1]/2., 0, '%d=%.3g' % (indx, smth[indx]), ha='center', va='top', color='w')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax2 = plt.subplot(gs[1])
ax2.semilogy(smth)
return np.array(smth)
def test_ifft_basic(alg_func, *args, **kwargs):
"""Basic testing for IFFT algorithms to see if the final phase truly reproduces an initial target
Creates an image target (the center of the scipy.misc.face() image), runs the alg_func on it, and plots the results
for comparison by eye
:param alg_func:
:param args:
:param kwargs:
:return:
"""
if 'mixing_ratio' in kwargs:
intensity_correction = kwargs['mixing_ratio']
else:
intensity_correction = 1
target = np.asarray(misc.face()[:, :, 0], np.float)
x, y = _get_coordinate_arrays(target)
shp = target.shape
# x, y = np.ogrid[old_div(-shp[1], 2):old_div(shp[1], 2), old_div(-shp[0], 2):old_div(shp[0], 2)]
# x, y = np.meshgrid(x, y)
mask_size = 0.2
mask = (x**2 + y**2) > (mask_size * np.min(shp))**2
target[mask] = 0
target /= np.sum(target) # the target intensity is normalised to 1
init_phase = np.zeros(target.shape)
# Making an input field that focuses light on the target pattern reduces vortex creation and improves pattern
input_field = np.ones(shp) * np.exp(1j * 2*mask_size*np.min(shp) * ((x/np.max(x))**2+(y/np.max(y))**2))
kwargs['input_field'] = input_field
phase = alg_func(init_phase, target, *args, **kwargs)
output = old_div(np.fft.fftshift(np.fft.fft2(np.exp(1j * phase))), (np.prod(shp)))
print(np.sum(np.abs(output)**2), np.sum(np.abs(output[~mask])**2), np.sum(np.abs(output[mask])**2))
_errors = (target - np.abs(output)**2) / target
errors = _errors[np.abs(_errors) != np.inf]
avg = np.sqrt(np.mean(errors**2))
fig, axs = plt.subplots(2, 2, sharey=True, sharex=True, gridspec_kw=dict(wspace=0.01))
vmin, vmax = (np.min(target), np.max(target))
axs[0, 0].imshow(target, vmin=vmin, vmax=vmax)
axs[0, 0].set_title('Target')
axs[1, 0].imshow(phase)
axs[1, 0].set_title('Input Phase')
vmin *= intensity_correction
vmax *= intensity_correction
axs[0, 1].imshow(np.abs(output)**2, vmin=vmin, vmax=vmax)
axs[0, 1].set_title('Output')
axs[1, 1].imshow(np.angle(output))
axs[1, 1].set_title('Output Phase')
fig.suptitle(r'$\sqrt{\sum\left(\frac{target-output}{target}\right)^2}=$%g' % avg)
plt.show()
return output, target
if __name__ == "__main__":
test_ifft_basic(gerchberg_saxton)
| gpl-3.0 |
kevin-intel/scikit-learn | examples/linear_model/plot_logistic_path.py | 19 | 2352 | #!/usr/bin/env python
"""
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least regularized. The 4
coefficients of the models are collected and plotted as a "regularization
path": on the left-hand side of the figure (strong regularizers), all the
coefficients are exactly 0. When regularization gets progressively looser,
coefficients can get non-zero values one after the other.
Here we choose the liblinear solver because it can efficiently optimize for the
Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty.
Also note that we set a low value for the tolerance to make sure that the model
has converged before collecting the coefficients.
We also use warm_start=True which means that the coefficients of the models are
reused to initialize the next model fit to speed-up the computation of the
full-path.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X /= X.max() # Normalize X to speed-up convergence
# #############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 7, 16)
print("Computing regularization path ...")
start = time()
clf = linear_model.LogisticRegression(penalty='l1', solver='liblinear',
tol=1e-6, max_iter=int(1e6),
warm_start=True,
intercept_scaling=10000.)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took %0.3fs" % (time() - start))
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_, marker='o')
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
rjferrier/fluidity | tests/wetting_and_drying_balzano3_cg_parallel/plotfs_detec.py | 5 | 5095 | #!/usr/bin/env python
import vtktools
import sys
import math
import re
import commands
import matplotlib.pyplot as plt
import getopt
from scipy.special import erf
from numpy import poly1d
from matplotlib.pyplot import figure, show
from numpy import pi, sin, linspace
from matplotlib.mlab import stineman_interp
from numpy import exp, cos
from fluidity_tools import stat_parser
def mirror(x):
return 13800-x
def usage():
print 'Usage:'
print 'plotfs_detec.py --file=detector_filename --save=filename'
print '--save=... saves the plots as images instead of plotting them on the screen.'
# should be copied from the diamond extrude function. X is 2 dimensional
def bathymetry_function(X):
if X<=3600 or X>6000:
return -X/2760
elif X>3600 and X<=4800:
return X/2760-60.0/23
elif X>4800 and X<=6000:
return -X/920+100.0/23
################# Main ###########################
def main(argv=None):
filename=''
timestep_ana=0.0
dzero=0.01
save='' # If nonempty, we save the plots as images instead if showing them
wetting=False
try:
opts, args = getopt.getopt(sys.argv[1:], "", ['file=','save='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '--file':
filename=arg
elif opt == '--save':
save=arg
if filename=='':
print 'No filename specified. You have to give the detectors filename.'
usage()
sys.exit(2)
####################### Print time plot ###########################
print 'Generating time plot'
s = stat_parser(filename)
timesteps=s["ElapsedTime"]["value"]
timestep=timesteps[1]-timesteps[0]
print "Found ", len(timesteps), " timesteps with dt=", timestep
if timestep_ana==0.0:
timestep_ana=timestep
fs=s["water"]["FreeSurface"]
print "Found ", len(fs), " detectors. We assume they are equidistant distributed over the domain (", 0, "-", 13800, ")."
# Get and plot results
plt.ion() # swith on interactive mode
plt.rcParams['font.size'] = 22
fig2 = figure(figsize=(8, 6.2))
fig2.subplots_adjust(left=0.15, bottom=0.15)
ax2 = fig2.add_subplot(111)
plot_start=580 # in timesteps
plot_end=581 # in timesteps
plot_name=''
for t in range(0,len(timesteps)):
# ignore the first waveperiod
if t<plot_start:
continue
if t>plot_end:
continue
fsvalues=[]
xcoords=[]
for name, item in fs.iteritems():
#print name
xcoords.append(mirror(s[name]['position'][0][0]))
#print xcoord
fsvalues.append(fs[name][t])
# Plot result of one timestep
ax2.plot(xcoords,fsvalues,'b,', label='Numerical solution')
# Plot Analytical solution
fsvalues_ana=[]
offset=-bathymetry_function(0.0)+dzero
xcoords.sort()
for x in xcoords:
fsvalues_ana.append(bathymetry_function(mirror(x))-offset)
# Plot vertical line in bathmetry on right boundary
xcoords.append(xcoords[len(xcoords)-1]+0.000000001)
fsvalues_ana.append(2.1)
ax2.plot(xcoords, fsvalues_ana, 'k', label='Bathymetry', linewidth=2.5)
#plt.legend()
if t==plot_end:
# change from meters in kilometers in the x-axis
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = plt.xticks()
for i in range(0,len(locs)):
labels[i]=str(locs[i]/1000)
plt.xticks(locs, labels)
plt.ylim(-2.2,1.4)
# plt.title(plot_name)
plt.xlabel('Position [km]')
plt.ylabel('Free surface [m]')
if save=='':
plt.draw()
raw_input("Please press Enter")
else:
plt.savefig(save+'.pdf', facecolor='white', edgecolor='black', dpi=100)
plt.cla()
t=t+1
# Make video from the images:
# mencoder "mf://*.png" -mf type=png:fps=30 -ovc lavc -o output.avi
if __name__ == "__main__":
main()
| lgpl-2.1 |
xmnlab/minilab | gui/plotter/async_chart.py | 1 | 2771 | from __future__ import print_function, division
from collections import defaultdict
from matplotlib import pyplot as plt
from matplotlib.ticker import EngFormatter
from random import random, randint
from copy import deepcopy
import time
import numpy as np
import traceback
class DaqMultiPlotter(object):
"""
"""
data_size = None
time = None
formatter = EngFormatter(unit='s', places=1)
data = {}
# interval
frame_limits = None
@classmethod
def configure(cls, samples_per_channel, devices=[]):
"""
"""
cls.data_size = samples_per_channel
cls.time = np.linspace(0, 1, samples_per_channel)
cls.formatter = EngFormatter(unit='s', places=1)
# interval
cls.frame_limits = [0, 1, -10, 10]
for dev in devices:
cls.data[dev] = {}
@classmethod
def start(cls):
plt.ion()
plt.show()
@classmethod
def send_data(cls, data):
"""
"""
for buffer_name, buffer_data in data.items():
cls.data[buffer_name] = buffer_data
@classmethod
def show(cls):
plt.clf()
num_charts = len(cls.data)
i = 0
buffer_data = deepcopy(cls.data)
for buffer_name in buffer_data:
i += 1
chart_id = num_charts*100 + 10 + i
ax = plt.subplot(chart_id)
ax.xaxis.set_major_formatter(cls.formatter)
ax.set_xlabel(buffer_name)
ax.axis(cls.frame_limits)
ax.set_autoscale_on(False)
plt.grid()
for ch, ch_data in buffer_data[buffer_name].items():
if len(cls.time) != len(ch_data):
print(len(cls.time), len(ch_data), ch)
ax.plot(cls.time, ch_data)
plt.draw()
plt.pause(0.00000001)
@classmethod
def stop(cls):
plt.ioff()
plt.show()
if __name__ == '__main__':
def test1():
"""
"""
cols = 2
rows = 1000
num_frames = 100
interval_a = -8
interval_b = +8
data = defaultdict(dict)
DaqMultiPlotter.configure(rows, ['ceramic', 'polymer'])
DaqMultiPlotter.start()
try:
while True:
for group in ['ceramic', 'polymer']:
for x in range(cols):
data[group]['Dev1/ia%s' % x] = (
(interval_b - interval_a) *
np.random.random_sample((rows,)) + interval_a
)
DaqMultiPlotter.send_data(data)
except Exception as e:
print(traceback.format_exc())
DaqMultiPlotter.stop()
test1()
# math_algorithms | gpl-3.0 |
pompiduskus/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
james-pack/ml | pack/ml/nodes/training_visualization.py | 1 | 4326 | import numpy as np
from matplotlib import pyplot as plt
class TrainingVisualization(object):
def __init__(self):
self.training_result = None
self.figure = None
self.loss_subplot = None
self.aggregate_loss_line = None
self.step_size_subplot = None
self.step_size_line = None
self.accuracy_subplot = None
self.training_accuracy_line = None
self.validation_accuracy_line = None
self.dw_subplot = None
self.dw_line = None
self.db_line = None
def set_result(self, training_result):
if self.training_result != training_result:
self.training_result = training_result
self.figure = plt.figure()
self.loss_subplot = self.figure.add_subplot(414)
self.loss_subplot.set_xlabel('epoch')
self.loss_subplot.set_ylabel('loss')
self.loss_subplot.set_color_cycle(['brown', 'purple'])
self.loss_subplot.set_xlim(0, training_result.num_epochs)
# Returns a tuple of line objects, thus the comma.
self.aggregate_loss_line, = self.loss_subplot.semilogy([], [], linewidth=1.0, linestyle="-")
self.step_size_subplot = self.figure.add_subplot(413)
self.step_size_subplot.set_ylabel('step_size')
self.step_size_subplot.set_color_cycle(['brown', 'purple'])
self.step_size_subplot.set_xlim(0, training_result.num_epochs)
# Returns a tuple of line objects, thus the comma.
self.step_size_line, = self.step_size_subplot.plot([], [], linewidth=1.0, linestyle="-")
self.accuracy_subplot = self.figure.add_subplot(411, sharex=self.loss_subplot)
self.accuracy_subplot.set_ylabel('accuracy')
self.accuracy_subplot.set_color_cycle(['blue', 'red', 'green'])
self.accuracy_subplot.set_xlim(0, training_result.num_epochs)
self.accuracy_subplot.set_ylim(0, 1.0)
# Returns a tuple of line objects, thus the comma.
self.training_accuracy_line, = self.accuracy_subplot.plot([], [], label='$training$', linewidth=1.0, linestyle="-")
self.validation_accuracy_line, = self.accuracy_subplot.plot([], [], label='$validation$', linewidth=1.0, linestyle="-")
self.accuracy_subplot.legend(loc='best')
self.dw_subplot = self.figure.add_subplot(412, sharex=self.loss_subplot)
self.dw_subplot.set_ylabel('dW')
self.dw_subplot.set_color_cycle(['brown', 'purple'])
self.dw_subplot.set_xlim(0, training_result.num_epochs)
# Returns a tuple of line objects, thus the comma.
self.dw_line, = self.dw_subplot.semilogy([], [], label='$dW$', linewidth=1.0, linestyle="-")
self.db_line, = self.dw_subplot.semilogy([], [], label='$db$', linewidth=1.0, linestyle="-")
self.dw_subplot.legend(loc='best')
plt.show(block=False)
def update(self):
epochs = np.linspace(0, self.training_result.num_epochs, self.training_result.num_epochs, endpoint=False)
self.aggregate_loss_line.set_xdata(epochs)
self.aggregate_loss_line.set_ydata(self.training_result.aggregate_loss())
self.loss_subplot.relim()
self.loss_subplot.autoscale_view()
self.step_size_line.set_xdata(epochs)
self.step_size_line.set_ydata(self.training_result.step_sizes())
self.step_size_subplot.relim()
self.step_size_subplot.autoscale_view()
#self.training_accuracy_line.set_xdata(epochs)
#self.training_accuracy_line.set_ydata(self.training_result.accuracy('training'))
#self.validation_accuracy_line.set_xdata(epochs)
#self.validation_accuracy_line.set_ydata(self.training_result.accuracy('validation'))
#self.accuracy_subplot.relim()
#self.accuracy_subplot.autoscale_view()
self.dw_line.set_xdata(epochs)
self.dw_line.set_ydata(self.training_result.stage_metric('linear_1', 'dW'))
self.db_line.set_xdata(epochs)
self.db_line.set_ydata(self.training_result.stage_metric('linear_1', 'db'))
self.dw_subplot.relim()
self.dw_subplot.autoscale_view()
self.figure.canvas.draw()
self.figure.canvas.flush_events()
| mit |
stuart-knock/tvb-library | tvb/simulator/plot/power_spectra_interactive.py | 3 | 18840 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
An interactive power spectra plot generated from a TVB TimeSeries datatype.
Usage
::
#Load the demo data
import numpy
data = numpy.load("demos/demo_data_region_16s_2048Hz.npy")
period = 0.00048828125 #NOTE: Providing period in seconds
#Create a tvb TimeSeries object
import tvb.datatypes.time_series
tsr = tvb.datatypes.time_series.TimeSeriesRegion()
tsr.data = data
tsr.sample_period = period
#Create and launch the interactive visualiser
import tvb.simulator.power_spectra_interactive as ps_int
psi = ps_int.PowerSpectraInteractive(time_series=tsr)
psi.show()
.. moduleauthor:: Stuart A. Knock <Stuart@tvb.invalid>
"""
#TODO: There are fence-posts...
#TODO: add a save button, for current powerspectra view (data more than fig)
#TODO: channel/region selection, with surface time-series vertex slection
# grouped by region
import numpy
import pylab
import matplotlib.widgets as widgets
#The Virtual Brain
from tvb.simulator.common import get_logger
LOG = get_logger(__name__)
import tvb.datatypes.time_series as time_series_datatypes
import tvb.basic.traits.core as core
import tvb.basic.traits.types_basic as basic
# Define a colour theme... see: matplotlib.colors.cnames.keys()
BACKGROUNDCOLOUR = "slategrey"
EDGECOLOUR = "darkslateblue"
AXCOLOUR = "steelblue"
BUTTONCOLOUR = "steelblue"
HOVERCOLOUR = "blue"
class PowerSpectraInteractive(core.Type):
"""
The graphical interface for visualising the power-spectra (FFT) of a
timeseries provide controls for setting:
- which state-variable and mode to display [sets]
- log or linear scaling for the power or frequency axis [binary]
- sementation lenth [set]
- windowing function [set]
- power normalisation [binary] (emphasise relative frequency contribution)
- show std or sem [binary]
"""
time_series = time_series_datatypes.TimeSeries(
label = "Timeseries",
default = None,
required = True,
doc = """ The timeseries to which the FFT is to be applied.""")
first_n = basic.Integer(
label = "Display the first 'n'",
default = -1,
required = True,
doc = """Primarily intended for displaying the first N components of a
surface PCA timeseries. Defaults to -1, meaning it'll display all
of 'space' (ie, regions or vertices or channels). In other words,
for Region or M/EEG timeseries you can ignore this, but, for a
surface timeseries it really must be set.""")
def __init__(self, **kwargs):
"""
Initialise based on provided keywords or their traited defaults. Also,
initialise the place-holder attributes that aren't filled until the
show() method is called.
"""
#figure
self.ifft_fig = None
#time-series
self.fft_ax = None
#Current state
self.xscale = "linear"
self.yscale = "log"
self.mode = 0
self.variable = 0
self.show_sem = False
self.show_std = False
self.normalise_power = "no"
self.window_length = 0.25
self.window_function = "None"
#Selectors
self.xscale_selector = None
self.yscale_selector = None
self.mode_selector = None
self.variable_selector = None
self.show_sem_selector = None
self.show_std_selector = None
self.normalise_power_selector = None
self.window_length_selector = None
self.window_function_selector = None
#
possible_freq_steps = [2**x for x in range(-2, 7)] #Hz
#possible_freq_steps.append(1.0 / self.time_series_length) #Hz
self.possible_window_lengths = 1.0 / numpy.array(possible_freq_steps) #s
self.freq_step = 1.0 / self.window_length
self.frequency = None
self.spectra = None
self.spectra_norm = None
#Sliders
#self.window_length_slider = None
def configure(self):
""" Seperate configure cause ttraits be busted... """
LOG.debug("time_series shape: %s" % str(self.time_series.data.shape))
#TODO: if isinstance(self.time_series, TimeSeriesSurface) and self.first_n == -1: #LOG.error, return.
self.data = self.time_series.data[:, :, :self.first_n, :]
self.period = self.time_series.sample_period
self.max_freq = 0.5 / self.period
self.units = "Hz"
self.tpts = self.data.shape[0]
self.nsrs = self.data.shape[2]
self.time_series_length = self.tpts * self.period
self.time = numpy.arange(self.tpts) * self.period
self.labels = ["channel_%0.3d" % k for k in range(self.nsrs)]
def show(self):
""" Generate the interactive power-spectra figure. """
#Make sure everything is configured
self.configure()
#Make the figure:
self.create_figure()
#Selectors
self.add_xscale_selector()
self.add_yscale_selector()
self.add_mode_selector()
self.add_variable_selector()
self.add_normalise_power_selector()
self.add_window_length_selector()
self.add_window_function_selector()
#Sliders
#self.add_window_length_slider() #Want discrete values
#self.add_scaling_slider()
#...
self.calc_fft()
#Plot timeseries
self.plot_spectra()
pylab.show()
##------------------------------------------------------------------------##
##------------------ Functions for building the figure -------------------##
##------------------------------------------------------------------------##
def create_figure(self):
""" Create the figure and time-series axes. """
time_series_type = self.time_series.__class__.__name__
try:
figure_window_title = "Interactive power spectra: " + time_series_type
pylab.close(figure_window_title)
self.ifft_fig = pylab.figure(num = figure_window_title,
figsize = (16, 8),
facecolor = BACKGROUNDCOLOUR,
edgecolor = EDGECOLOUR)
except ValueError:
LOG.info("My life would be easier if you'd update your PyLab...")
figure_number = 42
pylab.close(figure_number)
self.ifft_fig = pylab.figure(num = figure_number,
figsize = (16, 8),
facecolor = BACKGROUNDCOLOUR,
edgecolor = EDGECOLOUR)
self.fft_ax = self.ifft_fig.add_axes([0.15, 0.2, 0.7, 0.75])
def add_xscale_selector(self):
"""
Add a radio button to the figure for selecting which scaling the x-axes
should use.
"""
pos_shp = [0.45, 0.02, 0.05, 0.104]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="xscale")
xscale_tuple = ("log", "linear")
self.xscale_selector = widgets.RadioButtons(rax, xscale_tuple, active=1)
self.xscale_selector.on_clicked(self.update_xscale)
def add_yscale_selector(self):
"""
Add a radio button to the figure for selecting which scaling the y-axes
should use.
"""
pos_shp = [0.02, 0.5, 0.05, 0.104]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="yscale")
yscale_tuple = ("log", "linear")
self.yscale_selector = widgets.RadioButtons(rax, yscale_tuple, active=0)
self.yscale_selector.on_clicked(self.update_yscale)
def add_mode_selector(self):
"""
Add a radio button to the figure for selecting which mode of the model
should be displayed.
"""
pos_shp = [0.02, 0.07, 0.05, 0.1+0.002*self.data.shape[3]]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="Mode")
mode_tuple = tuple(range(self.data.shape[3]))
self.mode_selector = widgets.RadioButtons(rax, mode_tuple, active=0)
self.mode_selector.on_clicked(self.update_mode)
def add_variable_selector(self):
"""
Generate radio selector buttons to set which state variable is
displayed.
"""
noc = self.data.shape[1] # number of choices
#State variable for the x axis
pos_shp = [0.02, 0.22, 0.05, 0.12+0.008*noc]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR,
title="state variable")
self.variable_selector = widgets.RadioButtons(rax, tuple(range(noc)),
active=0)
self.variable_selector.on_clicked(self.update_variable)
def add_window_length_selector(self):
"""
Generate radio selector buttons to set the window length is seconds.
"""
noc = self.possible_window_lengths.shape[0] # number of choices
#State variable for the x axis
pos_shp = [0.88, 0.07, 0.1, 0.12+0.02*noc]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR,
title="Segment length")
wl_tup = tuple(self.possible_window_lengths)
self.window_length_selector = widgets.RadioButtons(rax, wl_tup, active=4)
self.window_length_selector.on_clicked(self.update_window_length)
def add_window_function_selector(self):
"""
Generate radio selector buttons to set the windowing function.
"""
#TODO: add support for kaiser, requiers specification of beta.
wf_tup = ("None", "hamming", "bartlett", "blackman", "hanning")
noc = len(wf_tup) # number of choices
#State variable for the x axis
pos_shp = [0.88, 0.77, 0.085, 0.12+0.01*noc]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR,
title="Windowing function")
self.window_function_selector = widgets.RadioButtons(rax, wf_tup, active=0)
self.window_function_selector.on_clicked(self.update_window_function)
def add_normalise_power_selector(self):
"""
Add a radio button to chose whether or not the power of all spectra
shouold be normalised to 1.
"""
pos_shp = [0.02, 0.8, 0.05, 0.104]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="normalise")
np_tuple = ("yes", "no")
self.normalise_power_selector = widgets.RadioButtons(rax, np_tuple, active=1)
self.normalise_power_selector.on_clicked(self.update_normalise_power)
##------------------------------------------------------------------------##
##------------------ Functions for updating the state --------------------##
##------------------------------------------------------------------------##
def calc_fft(self):
"""
Calculate FFT using current state of the window_length, window_function,
"""
#Segment time-series, overlapping if necessary
nseg = int(numpy.ceil(self.time_series_length / self.window_length))
if nseg != 1:
seg_tpts = self.window_length / self.period
overlap = ((seg_tpts * nseg) - self.tpts) / (nseg-1)
starts = [max(seg*(seg_tpts - overlap), 0) for seg in range(nseg)]
segments = [self.data[start:start+seg_tpts] for start in starts]
segments = [segment[:, :, :, numpy.newaxis] for segment in segments]
time_series = numpy.concatenate(segments, axis=4)
else:
time_series = self.data[:, :, :, :, numpy.newaxis]
seg_tpts = time_series.shape[0]
#Base-line correct segmented time-series
time_series = time_series - time_series.mean(axis=0)[numpy.newaxis, :]
#Apply windowing function
if self.window_function != "None":
window_function = eval("".join(("numpy.", self.window_function)))
window_mask = numpy.reshape(window_function(seg_tpts),
(seg_tpts, 1, 1, 1, 1))
time_series = time_series * window_mask
#Calculate the FFT
result = numpy.fft.fft(time_series, axis=0)
nfreq = len(result)/2
self.frequency = numpy.arange(0, self.max_freq, self.freq_step)
LOG.debug("frequency shape: %s" % str(self.frequency.shape))
self.spectra = numpy.mean(numpy.abs(result[1:nfreq+1])**2, axis=-1)
LOG.debug("spectra shape: %s" % str(self.spectra.shape))
self.spectra_norm = (self.spectra / numpy.sum(self.spectra, axis=0))
LOG.debug("spectra_norm shape: %s" % str(self.spectra_norm.shape))
#import pdb; pdb.set_trace()
# self.spectra_std = numpy.std(numpy.abs(result[:nfreq]), axis=4)
# self.spectra_sem = self.spectra_std / time_series.shape[4]
##------------------------------------------------------------------------##
##------------------ Functions for updating the figure -------------------##
##------------------------------------------------------------------------##
def update_xscale(self, xscale):
"""
Update the FFT axes' xscale to either log or linear based on radio
button selection.
"""
self.xscale = xscale
self.fft_ax.set_xscale(self.xscale)
pylab.draw()
def update_yscale(self, yscale):
"""
Update the FFT axes' yscale to either log or linear based on radio
button selection.
"""
self.yscale = yscale
self.fft_ax.set_yscale(self.yscale)
pylab.draw()
def update_mode(self, mode):
""" Update the visualised mode based on radio button selection. """
self.mode = mode
self.plot_spectra()
def update_variable(self, variable):
"""
Update state variable being plotted based on radio buttton selection.
"""
self.variable = variable
self.plot_spectra()
def update_normalise_power(self, normalise_power):
""" Update whether to normalise based on radio button selection. """
self.normalise_power = normalise_power
self.plot_spectra()
def update_window_length(self, length):
"""
Update timeseries window length based on the selected value.
"""
#TODO: need this casting but not sure why, don't need int() with mode...
self.window_length = numpy.float64(length)
#import pdb; pdb.set_trace()
self.freq_step = 1.0 / self.window_length
self.update_spectra()
def update_window_function(self, window_function):
"""
Update windowing function based on the radio button selection.
"""
self.window_function = window_function
self.update_spectra()
def update_spectra(self):
""" Clear the axes and redraw the power-spectra. """
self.calc_fft()
self.plot_spectra()
# def plot_std(self):
# """ Plot """
# std = (self.spectra[:, self.variable, :, self.mode] +
# self.spectra_std[:, self.variable, :, self.mode])
# self.fft_ax.plot(self.frequency, std, "--")
#
#
# def plot_sem(self):
# """ """
# sem = (self.spectra[:, self.variable, :, self.mode] +
# self.spectra_sem[:, self.variable, :, self.mode])
# self.fft_ax.plot(self.frequency, sem, ":")
def plot_spectra(self):
""" Plot the power spectra. """
self.fft_ax.clear()
# Set title and axis labels
time_series_type = self.time_series.__class__.__name__
self.fft_ax.set(title = time_series_type)
self.fft_ax.set(xlabel = "Frequency (%s)" % self.units)
self.fft_ax.set(ylabel = "Power")
# Set x and y scale based on curent radio button selection.
self.fft_ax.set_xscale(self.xscale)
self.fft_ax.set_yscale(self.yscale)
if hasattr(self.fft_ax, 'autoscale'):
self.fft_ax.autoscale(enable=True, axis='both', tight=True)
#import pdb; pdb.set_trace()
#Plot the power spectra
if self.normalise_power == "yes":
self.fft_ax.plot(self.frequency,
self.spectra_norm[:, self.variable, :, self.mode])
else:
self.fft_ax.plot(self.frequency,
self.spectra[:, self.variable, :, self.mode])
# #TODO: Need to ensure colour matching... and allow region selection.
# #If requested, add standard deviation
# if self.show_std:
# self.plot_std(self)
#
# #If requested, add standard error in mean
# if self.show_sem:
# self.plot_sem(self)
pylab.draw()
if __name__ == "__main__":
# Do some stuff that tests or makes use of this module...
LOG.info("Testing %s module..." % __file__)
try:
data = numpy.load("../demos/demo_data_region_16s_2048Hz.npy")
except IOError:
LOG.error("Can't load demo data. Run demos/generate_region_demo_data.py")
raise
period = 0.00048828125 #NOTE: Providing period in seconds
tsr = time_series_datatypes.TimeSeriesRegion()
tsr.data = data
tsr.sample_period = period
psi = PowerSpectraInteractive(time_series=tsr)
psi.show()
| gpl-2.0 |
manipopopo/tensorflow | tensorflow/python/estimator/inputs/pandas_io_test.py | 7 | 11057 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def makeTestDataFrameWithYAsDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
a_label = np.arange(10, 14)
b_label = np.arange(50, 54)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.DataFrame({'a_target': a_label, 'b_target': b_label}, index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_RaisesWhenTargetColumnIsAList(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.assertRaisesRegexp(TypeError,
'target_column must be a string type'):
pandas_io.pandas_input_fn(x, y, batch_size=2,
shuffle=False,
num_epochs=1,
target_column=['one', 'two'])
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaisesRegexp(ValueError,
'shuffle must be provided and explicitly '
'set as boolean'):
# Default shuffle is None
pandas_io.pandas_input_fn(x, y_noindex)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFnWhenYIsDataFrame_ProducesExpectedOutput(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, targets = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(targets['a_target'], [10, 11])
self.assertAllEqual(targets['b_target'], [50, 51])
def testPandasInputFnYIsDataFrame_HandlesOverlappingColumns(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
y = y.rename(columns={'a_target': 'a', 'b_target': 'b'})
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, targets = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(targets['a'], [10, 11])
self.assertAllEqual(targets['b'], [50, 51])
def testPandasInputFnYIsDataFrame_HandlesOverlappingColumnsInTargets(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
y = y.rename(columns={'a_target': 'a', 'b_target': 'a_n'})
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, targets = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(targets['a'], [10, 11])
self.assertAllEqual(targets['a_n'], [50, 51])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
rs2/pandas | pandas/tests/reshape/test_util.py | 3 | 2846 | import numpy as np
import pytest
from pandas import Index, date_range
import pandas._testing as tm
from pandas.core.reshape.util import cartesian_product
class TestCartesianProduct:
def test_simple(self):
x, y = list("ABC"), [1, 22]
result1, result2 = cartesian_product([x, y])
expected1 = np.array(["A", "A", "B", "B", "C", "C"])
expected2 = np.array([1, 22, 1, 22, 1, 22])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range("2000-01-01", periods=2)
result1, result2 = [Index(y).day for y in cartesian_product([x, x])]
expected1 = Index([1, 1, 2, 2])
expected2 = Index([1, 2, 1, 2])
tm.assert_index_equal(result1, expected1)
tm.assert_index_equal(result2, expected2)
def test_tzaware_retained(self):
x = date_range("2000-01-01", periods=2, tz="US/Pacific")
y = np.array([3, 4])
result1, result2 = cartesian_product([x, y])
expected = x.repeat(2)
tm.assert_index_equal(result1, expected)
def test_tzaware_retained_categorical(self):
x = date_range("2000-01-01", periods=2, tz="US/Pacific").astype("category")
y = np.array([3, 4])
result1, result2 = cartesian_product([x, y])
expected = x.repeat(2)
tm.assert_index_equal(result1, expected)
def test_empty(self):
# product of empty factors
X = [[], [0, 1], []]
Y = [[], [], ["a", "b", "c"]]
for x, y in zip(X, Y):
expected1 = np.array([], dtype=np.asarray(x).dtype)
expected2 = np.array([], dtype=np.asarray(y).dtype)
result1, result2 = cartesian_product([x, y])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
# empty product (empty input):
result = cartesian_product([])
expected = []
assert result == expected
@pytest.mark.parametrize(
"X", [1, [1], [1, 2], [[1], 2], "a", ["a"], ["a", "b"], [["a"], "b"]]
)
def test_invalid_input(self, X):
msg = "Input must be a list-like of list-likes"
with pytest.raises(TypeError, match=msg):
cartesian_product(X=X)
def test_exceed_product_space(self):
# GH31355: raise useful error when produce space is too large
msg = "Product space too large to allocate arrays!"
with pytest.raises(ValueError, match=msg):
dims = [np.arange(0, 22, dtype=np.int16) for i in range(12)] + [
(np.arange(15128, dtype=np.int16)),
]
cartesian_product(X=dims)
| bsd-3-clause |
JustinWingChungHui/MyFamilyRoot | facial_recognition/models.py | 2 | 1355 | from django.db import models
from sklearn import neighbors
from family_tree.models import Family
import math
import pickle
# Create your models here.
class FaceModel(models.Model):
family = models.OneToOneField(
Family,
on_delete=models.CASCADE,
primary_key=True,
)
fit_data_faces = models.BinaryField(null = False, blank = False)
fit_data_person_ids = models.BinaryField(null = False, blank = False)
n_neighbors = models.IntegerField(null = False, blank = False)
trained_knn_model = models.BinaryField(null = False, blank = False)
def __str__(self): # __unicode__ on Python 2
return 'Family:{0} n:{1}'.format(self.family_id, self.n_neighbors)
def update_knn_classifier(self, X, y):
'''
Updates the face recognition model
'''
# Good estimate
n_neighbors = int(round(math.sqrt(len(X))))
# Creating and training the KNN classifier
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm='ball_tree', weights='distance')
knn_clf.fit(X, y)
# 'Pickling and saving to db
self.fit_data_faces = pickle.dumps(X)
self.fit_data_person_ids = pickle.dumps(y)
self.n_neighbors = n_neighbors
self.trained_knn_model = pickle.dumps(knn_clf)
self.save()
| gpl-2.0 |
untom/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
UpSea/midProjects | histdataUI/Widgets/pgCrossAddition.py | 1 | 6756 | # -*- coding: utf-8 -*-
import sys,os
xpower = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'thirdParty','pyqtgraph-0.9.10'))
sys.path.append(xpower)
xpower = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir))
sys.path.append(xpower)
from PyQt4 import QtCore, QtGui
import pyqtgraph as pg
import numpy as np
import matplotlib.dates as mpd
import datetime as dt
import pytz
from Widgets.pgCandleItem import CandlestickItem
#----------------------------------------------------------------------
########################################################################
class pgCrossAddition(pg.PlotWidget):
"""
此类给pg.PlotWidget()添加crossHair功能
所有从此类继承的子类都将获得此crossHair功能
"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(pgCrossAddition, self).__init__()
self.vLine = pg.InfiniteLine(angle=90, movable=False)
self.hLine = pg.InfiniteLine(angle=0, movable=False)
self.textPrice = pg.TextItem('price')
self.textDate = pg.TextItem('date')
self.addItem(self.textDate, ignoreBounds=True)
self.addItem(self.textPrice, ignoreBounds=True)
self.addItem(self.vLine, ignoreBounds=True)
self.addItem(self.hLine, ignoreBounds=True)
self.proxy = pg.SignalProxy(self.scene().sigMouseMoved, rateLimit=60, slot=self.mouseMoved)
def mouseMoved(self,evt):
import matplotlib.dates as mpd
import datetime as dt
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if self.sceneBoundingRect().contains(pos):
mousePoint = self.plotItem.vb.mapSceneToView(pos)
xAxis = mousePoint.x()
yAxis = mousePoint.y()
#mid 1)set contents to price and date lable
self.vLine.setPos(xAxis)
self.hLine.setPos(yAxis)
self.textPrice.setHtml(
'<div style="text-align: center">\
<span style="color: red; font-size: 10pt;">\
%0.3f\
</span>\
</div>'\
% (mousePoint.y()))
strTime = mpd.num2date(mousePoint.x()).astimezone(pytz.timezone('utc'))
if(strTime.year >=1900):
self.textDate.setHtml(
'<div style="text-align: center">\
<span style="color: red; font-size: 10pt;">\
%s\
</span>\
</div>'\
% (dt.datetime.strftime(strTime,'%Y-%m-%d %H:%M:%S%Z')))
#mid 2)get position environments
#mid 2.1)client area rect
rect = self.sceneBoundingRect()
leftAxis = self.getAxis('left')
bottomAxis = self.getAxis('bottom')
rectTextDate = self.textDate.boundingRect()
#mid 2.2)leftAxis width,bottomAxis height and textDate height.
leftAxisWidth = leftAxis.width()
bottomAxisHeight = bottomAxis.height()
rectTextDateHeight = rectTextDate.height()
#print leftAxisWidth,bottomAxisHeight
#mid 3)set positions of price and date lable
topLeft = self.plotItem.vb.mapSceneToView(QtCore.QPointF(rect.left()+leftAxisWidth,rect.top()))
bottomRight = self.plotItem.vb.mapSceneToView(QtCore.QPointF(rect.width(),rect.bottom()-(bottomAxisHeight+rectTextDateHeight)))
self.textDate.setPos(xAxis,bottomRight.y())
self.textPrice.setPos(topLeft.x(),yAxis)
#----------------------------------------------------------------------
def scatterAddition(self,x,y):
"""
此处将clicked定义为内部函数,是为了将传入参数pgPlot与其绑定
每个通过scatterAddition加入scatter的pgPlot都会保存一份自己的clicked函数
而clicked处理的是传入的参数pgPlot
"""
scatterPrice = pg.ScatterPlotItem(size=5, pen=pg.mkPen(None), pxMode=True, brush=pg.mkBrush(255, 255, 255, 120))
spots = [{'pos': (x,price)} for x,price in zip(x,y)]
scatterPrice.addPoints(spots)
self.addItem(scatterPrice)
self.scatterInfo = pg.TextItem("test") ## Make all plots clickable
self.addItem(self.scatterInfo)
self.lastClicked = []
def clicked(plot, points):
if(len(points)>0):
mousePoint = points[0].pos()
self.scatterInfo.setHtml(
'<div style="text-align: center">\
<span style="color: red; font-size: 10pt;">\
%s\
</span>\
<br>\
<span style="color: red; font-size: 10pt;">\
%.3f\
</span>\
</div>'\
% (dt.datetime.strftime(mpd.num2date(mousePoint.x()).astimezone(pytz.timezone('utc')),'%Y-%m-%d %H:%M:%S%Z'),
mousePoint.y()
))
xAxis = mousePoint.x()
yAxis = mousePoint.y()
self.scatterInfo.setPos(xAxis,yAxis)
for p in self.lastClicked:
p.resetPen()
for p in points:
p.setPen('b', width=2)
self.lastClicked = points
scatterPrice.sigClicked.connect(clicked)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
import os,sys
dataRoot = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'histdata'))
sys.path.append(dataRoot)
import dataCenter as dataCenter
#mid 1) creates windows
dialog = QtGui.QDialog()
layout = QtGui.QHBoxLayout()
dialog.setLayout(layout)
dialog.setWindowTitle(('ComboView'))
#mid 2) creates widgets
candle = pgCrossAddition()
#mid 3) creates Item and adds Item to widgets
candleData = dataCenter.getCandleData()
candleItem = CandlestickItem(candleData)
candle.addItem(candleItem)
#mid 4) arrange widgets
layout.addWidget(candle)
dialog.showMaximized()
sys.exit(app.exec_()) | mit |
jblackburne/scikit-learn | sklearn/utils/tests/test_utils.py | 47 | 9089 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.arpack import eigsh
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.graph import graph_laplacian
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1],
replace=False, n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
# Issue:6581, n_samples can be more when replace is True (default).
assert_equal(len(resample([1, 2], n_samples=5)), 5)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = graph_laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1,1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/frame/test_analytics.py | 1 | 76158 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import timedelta, datetime
from distutils.version import LooseVersion
import sys
import nose
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas.compat import lrange
from pandas import (compat, isnull, notnull, DataFrame, Series,
MultiIndex, date_range, Timestamp)
import pandas as pd
import pandas.core.nanops as nanops
import pandas.formats.printing as printing
from pandas.util.testing import (assert_almost_equal,
assert_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameAnalytics(tm.TestCase, TestData):
_multiprocess_can_split_ = True
# ---------------------------------------------------------------------=
# Correlation and covariance
def test_corr_pearson(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
def test_corr_kendall(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
def test_corr_spearman(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.ix['A', 'B'] = expected.ix['B', 'A'] = nan
assert_frame_equal(result, expected)
def test_corr_non_numeric(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
# exclude non-numeric types
result = self.mixed_frame.corr()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].corr()
assert_frame_equal(result, expected)
def test_corr_nooverlap(self):
tm._skip_if_no_scipy()
# nothing in common
for meth in ['pearson', 'kendall', 'spearman']:
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.ix['A', 'B']))
self.assertTrue(isnull(rs.ix['B', 'A']))
self.assertEqual(rs.ix['A', 'A'], 1)
self.assertEqual(rs.ix['B', 'B'], 1)
self.assertTrue(isnull(rs.ix['C', 'C']))
def test_corr_constant(self):
tm._skip_if_no_scipy()
# constant --> all NA
for meth in ['pearson', 'spearman']:
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.values).all())
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
# it works!
df3.cov()
df3.corr()
def test_corr_int_and_boolean(self):
tm._skip_if_no_scipy()
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
assert_frame_equal(df.corr(meth), expected)
def test_cov(self):
# min_periods no NAs (corner case)
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
self.assertTrue(isnull(result.values).all())
# with NAs
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.ix['A', 'B'] = np.nan
expected.ix['B', 'A'] = np.nan
# regular
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
# exclude non-numeric types
result = self.mixed_frame.cov()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].cov()
assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
assert_frame_equal(result, expected)
df.ix[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
self.assertNotIn('B', dropped)
dropped = a.corrwith(b, axis=1, drop=True)
self.assertNotIn(a.index[-1], dropped.index)
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
assert_almost_equal(correls[row], df1.ix[row].corr(df2.ix[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols])
assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols], axis=1)
assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
assert_almost_equal(c1, c2)
self.assertTrue(c1 < 1)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
self.assertEqual(result.columns.freq, 'MS')
self.assertEqual(result.columns.tz, expected.columns.tz)
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
assert_almost_equal(test.values, [2, 150, 'abcde'])
assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
# corner case
frame = DataFrame()
ct1 = frame.count(1)
tm.assertIsInstance(ct1, Series)
ct2 = frame.count(0)
tm.assertIsInstance(ct2, Series)
# GH #423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
assert_series_equal(result, expected)
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True)
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum,
frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
def test_stat_operators_attempt_obj_array(self):
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']
# GH #676
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
for meth in methods:
self.assertEqual(df.values.dtype, np.object_)
result = getattr(df, meth)(1)
expected = getattr(df.astype('f8'), meth)(1)
if not tm._incompat_bottleneck_version(meth):
assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
assert_frame_equal(cummin, expected)
# axis = 1
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = self.tsframe.cummin(axis=1)
self.assertEqual(np.shape(cummin_xs), np.shape(self.tsframe))
def test_cummax(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
assert_frame_equal(cummax, expected)
# axis = 1
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = self.tsframe.cummax(axis=1)
self.assertEqual(np.shape(cummax_xs), np.shape(self.tsframe))
def test_max(self):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_numeric_only_flag(self):
# GH #9201
methods = ['sem', 'var', 'std']
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.ix[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.ix[0, 'foo'] = 'a'
for meth in methods:
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
self.assertRaises(TypeError, lambda: getattr(df1, meth)
(axis=1, numeric_only=False))
self.assertRaises(TypeError, lambda: getattr(df2, meth)
(axis=1, numeric_only=False))
def test_cumsum(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
self.assertEqual(np.shape(cumsum_xs), np.shape(self.tsframe))
def test_cumprod(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
self.assertEqual(np.shape(cumprod_xs), np.shape(self.tsframe))
# ints
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
ranks0 = self.frame.rank()
ranks1 = self.frame.rank(1)
mask = np.isnan(self.frame.values)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
assert_frame_equal(result, exp)
def test_rank2(self):
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2., nan, 1.],
[2., 3., 1.]])
result = df.rank(1, numeric_only=False, ascending=True)
assert_frame_equal(result, expected)
expected = DataFrame([[1., nan, 2.],
[2., 1., 3.]])
result = df.rank(1, numeric_only=False, ascending=False)
assert_frame_equal(result, expected)
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
result = self.mixed_frame.rank(1)
expected = self.mixed_frame.rank(1, numeric_only=True)
assert_frame_equal(result, expected)
df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10,
1e60, 1e80, 1e-30]})
exp = DataFrame({"a": [3.5, 1., 3.5, 5., 6., 7., 2.]})
assert_frame_equal(df.rank(), exp)
def test_rank_na_option(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
# bottom
ranks0 = self.frame.rank(na_option='bottom')
ranks1 = self.frame.rank(1, na_option='bottom')
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = self.frame.rank(na_option='top')
ranks1 = self.frame.rank(1, na_option='top')
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = self.frame.rank(na_option='top', ascending=False)
ranks1 = self.frame.rank(1, na_option='top', ascending=False)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = self.frame.rank(na_option='bottom', ascending=False)
ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
def test_rank_axis(self):
# check if using axes' names gives the same result
df = pd.DataFrame([[2, 1], [4, 3]])
assert_frame_equal(df.rank(axis=0), df.rank(axis='index'))
assert_frame_equal(df.rank(axis=1), df.rank(axis='columns'))
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_sort_invalid_kwargs(self):
df = DataFrame([1, 2, 3], columns=['a'])
msg = "sort\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, df.sort, foo=2)
# Neither of these should raise an error because they
# are explicit keyword arguments in the signature and
# hence should not be swallowed by the kwargs parameter
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df.sort(axis=1)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df.sort(kind='mergesort')
msg = "the 'order' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, df.sort, order=2)
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
assert_series_equal(kurt, kurt2, check_names=False)
self.assertTrue(kurt.name is None)
self.assertEqual(kurt2.name, 'bar')
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True,
check_dates=False, check_less_precise=False):
if frame is None:
frame = self.frame
# set some NAs
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
self.assertIsInstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
self.assertIsInstance(result, Series)
self.assertTrue(len(result))
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if not tm._incompat_bottleneck_version(name):
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
self.assertEqual(lcd_dtype, result0.dtype)
self.assertEqual(lcd_dtype, result1.dtype)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
assertRaisesRegexp(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if not tm._incompat_bottleneck_version(name):
self.assertTrue(np.isnan(r0).all())
self.assertTrue(np.isnan(r1).all())
def test_mode(self):
df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
"B": [10, 10, 10, np.nan, 3, 4],
"C": [8, 8, 8, 9, 9, 9],
"D": np.arange(6, dtype='int64'),
"E": [8, 8, 1, 1, 3, 3]})
assert_frame_equal(df[["A"]].mode(),
pd.DataFrame({"A": [12]}))
expected = pd.Series([], dtype='int64', name='D').to_frame()
assert_frame_equal(df[["D"]].mode(), expected)
expected = pd.Series([1, 3, 8], dtype='int64', name='E').to_frame()
assert_frame_equal(df[["E"]].mode(), expected)
assert_frame_equal(df[["A", "B"]].mode(),
pd.DataFrame({"A": [12], "B": [10.]}))
assert_frame_equal(df.mode(),
pd.DataFrame({"A": [12, np.nan, np.nan],
"B": [10, np.nan, np.nan],
"C": [8, 9, np.nan],
"D": [np.nan, np.nan, np.nan],
"E": [1, 3, 8]}))
# outputs in sorted order
df["C"] = list(reversed(df["C"]))
printing.pprint_thing(df["C"])
printing.pprint_thing(df["C"].mode())
a, b = (df[["A", "B", "C"]].mode(),
pd.DataFrame({"A": [12, np.nan],
"B": [10, np.nan],
"C": [8, 9]}))
printing.pprint_thing(a)
printing.pprint_thing(b)
assert_frame_equal(a, b)
# should work with heterogeneous types
df = pd.DataFrame({"A": np.arange(6, dtype='int64'),
"B": pd.date_range('2011', periods=6),
"C": list('abcdef')})
exp = pd.DataFrame({"A": pd.Series([], dtype=df["A"].dtype),
"B": pd.Series([], dtype=df["B"].dtype),
"C": pd.Series([], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
# and also when not empty
df.loc[1, "A"] = 0
df.loc[4, "B"] = df.loc[3, "B"]
df.loc[5, "C"] = 'e'
exp = pd.DataFrame({"A": pd.Series([0], dtype=df["A"].dtype),
"B": pd.Series([df.loc[3, "B"]],
dtype=df["B"].dtype),
"C": pd.Series(['e'], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
self.assertEqual(result[0], diffs.ix[0, 'A'])
self.assertEqual(result[1], diffs.ix[0, 'B'])
result = diffs.min(axis=1)
self.assertTrue((result == diffs.ix[0, 'B']).all())
# max
result = diffs.max()
self.assertEqual(result[0], diffs.ix[2, 'A'])
self.assertEqual(result[1], diffs.ix[2, 'B'])
result = diffs.max(axis=1)
self.assertTrue((result == diffs['A']).all())
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.tseries.timedeltas import (
_coerce_scalar_to_timedelta_type as _coerce)
result = mixed.min()
expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),
_coerce(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
self.assertEqual(df['off1'].dtype, 'timedelta64[ns]')
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')
self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
tm.assertIsInstance(axis0, Series)
tm.assertIsInstance(axis1, Series)
self.assertEqual(len(axis0), 0)
self.assertEqual(len(axis1), 0)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
self.assertTrue(len(the_mean.index) < len(self.mixed_frame.columns))
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
self.assertEqual(means['bool'], self.frame['bool'].values.mean())
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
# Miscellanea
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
assert_series_equal(dm.count(), df.count())
assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isnull(df)
self.assertEqual(bools.sum(axis=1)[0], 10)
# Index of max / min
def test_idxmin(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmin, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmax, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmax, axis=2)
# ----------------------------------------------------------------------
# Logical reductions
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
df = DataFrame(randn(10, 4)) > 0
df.any(1)
df.all(1)
df.any(1, bool_only=True)
df.all(1, bool_only=True)
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper))
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper))
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
self.assertRaises(ValueError, f, axis=2)
# make sure works on mixed-type frame
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail:
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
self.assertFalse(r0.any())
self.assertFalse(r1.any())
else:
self.assertTrue(r0.all())
self.assertTrue(r1.all())
# ----------------------------------------------------------------------
# Top / bottom
def test_nlargest(self):
# GH10393
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nlargest(5, 'a')
expected = df.sort_values('a', ascending=False).head(5)
assert_frame_equal(result, expected)
def test_nlargest_multiple_columns(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nlargest(5, ['a', 'b'])
expected = df.sort_values(['a', 'b'], ascending=False).head(5)
assert_frame_equal(result, expected)
def test_nsmallest(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nsmallest(5, 'a')
expected = df.sort_values('a').head(5)
assert_frame_equal(result, expected)
def test_nsmallest_multiple_columns(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nsmallest(5, ['a', 'c'])
expected = df.sort_values(['a', 'c']).head(5)
assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH #4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
assert_frame_equal(result, expected)
def test_isin_empty(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
result = df.isin([])
expected = pd.DataFrame(False, df.index, df.columns)
assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with tm.assertRaises(TypeError):
df.isin('a')
with tm.assertRaises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
assert_frame_equal(result, expected)
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with tm.assertRaises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with tm.assertRaises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with tm.assertRaises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Row deduplication
def test_drop_duplicates(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.ix[[]]
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('AAA', take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.ix[[0]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(('AAA', 'B'), take_last=True)
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
# consider everything
df2 = df.ix[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df2.drop_duplicates(take_last=True)
with tm.assert_produces_warning(FutureWarning):
expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)
assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0, 2]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[-2, -1]]
assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0, 2]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('E', keep='last')
expected = df.iloc[[-2, -1]]
assert_frame_equal(result, expected)
# GH 11376
df = pd.DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
assert_frame_equal(df.drop_duplicates(), expected)
df = pd.DataFrame([[1, 0], [0, 2]])
assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-2, 0], [0, -4]])
assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = pd.DataFrame([[-x, x], [0, x + 4]])
assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-x, x], [x, x + 4]])
assert_frame_equal(df.drop_duplicates(), df)
# GH 11864
df = pd.DataFrame([i] * 9 for i in range(16))
df = df.append([[1] + [0] * 8], ignore_index=True)
for keep in ['first', 'last', False]:
assert_equal(df.duplicated(keep=keep).sum(), 0)
def test_drop_duplicates_for_take_all(self):
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
assert_frame_equal(result, expected)
def test_drop_duplicates_tuple(self):
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.ix[[]] # empty df
self.assertEqual(len(result), 0)
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(('AA', 'AB'), take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
assert_frame_equal(result, expected)
def test_drop_duplicates_NA(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('A')
expected = df.ix[[0, 2, 3]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.ix[[]] # empty df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('A', take_last=True)
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.ix[[0, 2, 3, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.ix[[6]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(['A', 'B'], take_last=True)
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.ix[[]] # empty df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('C', take_last=True)
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.ix[[0, 1, 2, 4]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.ix[[1]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(['C', 'B'], take_last=True)
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
# single column
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
assert_frame_equal(result, expected)
def test_drop_duplicates_inplace(self):
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.ix[[]]
result = df
assert_frame_equal(result, expected)
self.assertEqual(len(df), 0)
# deprecate take_last
df = orig.copy()
with tm.assert_produces_warning(FutureWarning):
df.drop_duplicates('A', take_last=True, inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.ix[[0, 1, 2, 3]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.ix[[0]]
result = df
assert_frame_equal(result, expected)
# deprecate take_last
df = orig.copy()
with tm.assert_produces_warning(FutureWarning):
df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
# consider everything
orig2 = orig.ix[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
assert_frame_equal(result, expected)
# deprecate take_last
df2 = orig2.copy()
with tm.assert_produces_warning(FutureWarning):
df2.drop_duplicates(take_last=True, inplace=True)
with tm.assert_produces_warning(FutureWarning):
expected = orig2.drop_duplicates(['A', 'B'], take_last=True)
result = df2
assert_frame_equal(result, expected)
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with self.assertRaises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
assert_frame_equal(
df.round(partial_round_dict), expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
assert_frame_equal(
df.round(wrong_round_dict), expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
assert_frame_equal(
big_df.round(negative_round_dict), expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': nan, 'col2': 1})
# TODO(wesm): unused?
expected_nan_round = DataFrame({ # noqa
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
if sys.version < LooseVersion('2.7'):
# Rounding with decimal is a ValueError in Python < 2.7
with self.assertRaises(ValueError):
df.round(nan_round_Series)
else:
with self.assertRaises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_numpy_round(self):
# See gh-12600
df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
out = np.round(df, decimals=0)
expected = DataFrame([[2., 1.], [0., 7.]])
assert_frame_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assertRaisesRegexp(ValueError, msg):
np.round(df, decimals=0, out=df)
def test_round_mixed_type(self):
# GH11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
assert_frame_equal(df.round(), round_0)
assert_frame_equal(df.round(1), df)
assert_frame_equal(df.round({'col1': 1}), df)
assert_frame_equal(df.round({'col1': 0}), round_0)
assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
self.assertTrue(rounded.index.equals(dfs.index))
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
self.assertRaises(ValueError, df.round, decimals)
def test_built_in_round(self):
if not compat.PY3:
raise nose.SkipTest("build in round cannot be overriden "
"prior to Python 3")
# GH11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
assert_frame_equal(round(df), expected_rounded)
# Clip
def test_clip(self):
median = self.frame.median().median()
capped = self.frame.clip_upper(median)
self.assertFalse((capped.values > median).any())
floored = self.frame.clip_lower(median)
self.assertFalse((floored.values < median).any())
double = self.frame.clip(upper=median, lower=median)
self.assertFalse((double.values != median).any())
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
self.assertTrue((clipped_df.values[lb_mask] == lb).all())
self.assertTrue((clipped_df.values[ub_mask] == ub).all())
self.assertTrue((clipped_df.values[mask] ==
df.values[mask]).all())
def test_clip_against_series(self):
# GH #6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=0)
for i in range(2):
lb_mask = df.iloc[:, i] <= lb
ub_mask = df.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
assert_series_equal(result, lb[lb_mask], check_names=False)
self.assertEqual(result.name, i)
result = clipped_df.loc[ub_mask, i]
assert_series_equal(result, ub[ub_mask], check_names=False)
self.assertEqual(result.name, i)
assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
def test_clip_against_frame(self):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
assert_frame_equal(clipped_df[mask], df[mask])
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
assert_series_equal(result, expected['one'], check_names=False)
self.assertTrue(result.name is None)
result = a.dot(b1['one'])
assert_series_equal(result, expected['one'], check_names=False)
self.assertTrue(result.name is None)
# can pass correct-length arrays
row = a.ix[0].values
result = a.dot(row)
exp = a.dot(a.ix[0])
assert_series_equal(result, exp)
with assertRaisesRegexp(ValueError, 'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
assertRaisesRegexp(ValueError, 'aligned', df.dot, df2)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
PiscesDream/Ideas | Cog_neusci/ex1/main.py | 1 | 4431 | from keras.layers import Input, Dense
from keras.models import Model
import keras
from keras import regularizers
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
keras.backend.theano_backend._set_device('dev1')
# this is the size of our encoded representations
encoding_dim = 16 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
def plot_encode_decode(encoder, decoder, x_test):
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28), interpolation='None')
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28), interpolation='None')
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig('sample.pdf', dpi=600)
print 'done'
def plot_max_activation(autoencoder):
W = autoencoder.layers[1].get_weights()[0].swapaxes(0,1)
np.savez_compressed('main.npz', data=W)
# plt.clf()
# N = 5
# M = (encoding_dim-1)/N+1
# plt.figure(figsize=(M, N))
# for i in range(encoding_dim):
# x = W[i]/np.sqrt((W[i]**2).sum())
# ax = plt.subplot(M, N, i + 1)
# plt.imshow(x.reshape(28, 28), interpolation='None')
# plt.gray()
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# plt.subplots_adjust(hspace=0.0, wspace=-1.0)
# plt.savefig('maxact.pdf')
# pass
def sparseAE():
input_img = Input(shape=(784,))
# add a Dense layer with a L1 activity regularizer
encoded = Dense(encoding_dim, activation='relu',
activity_regularizer=regularizers.activity_l1(10e-5))(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)
autoencoder = Model(input=input_img, output=decoded)
encoder = Model(input=input_img, output=encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim, ))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
return autoencoder, encoder, decoder
def AE():
# this is our input placeholder
input_img = Input(shape=(784, ))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input=input_img, output=decoded)
encoder = Model(input=input_img, output=encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim, ))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
return autoencoder, encoder, decoder
if __name__ == '__main__':
autoencoder, encoder, decoder = sparseAE()
autoencoder.compile(optimizer='adadelta',
loss='binary_crossentropy')
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print x_train.shape
print x_test.shape
# autoencoder.load_weights('model.h5')
autoencoder.fit(x_train, x_train,
nb_epoch=100,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
autoencoder.save_weights('model.h5', overwrite=True)
# plot_encode_decode(encoder, decoder, x_test)
plot_max_activation(autoencoder)
| apache-2.0 |
cerebis/i3seqdb | app/objects.py | 1 | 6004 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, String, Date, Float, ForeignKey, Table, CheckConstraint
import pandas
Base = declarative_base()
class Sample(Base):
"""
Defined by the minimum required for submission at NCBI
Subclasses are easily handled and one is included for clarity. These become a single
table by default. Requirements such as non-null at the database level require individual
tables -- supported by alchemy
"""
__tablename__ = 'sample'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False, unique=True)
organism = Column(String, nullable=False)
collection_date = Column(Date, nullable=False)
geo_loc_name = Column(String, nullable=False)
lat_lon = Column(String)
_type= Column(String)
# simple one-to-many
libraries = relationship('Library')
__mapper_args__ = {
'polymorphic_identity': 'sample',
'polymorphic_on': _type
}
@staticmethod
def make(sample_class, kwargs):
"""
Factory method for creating various subclasses of Sample.
:param sample_class: the subclass to create
:param kwargs: dict of fields, None/Nan fields will be removed.
:return: instance of requested class
"""
cl = sample_class.lower()
# get rid of problematic elements before passing to constructor
del kwargs['sample_class']
for k in kwargs.keys():
if not kwargs[k] or pandas.isnull(kwargs[k]):
del kwargs[k]
if cl == 'microbe':
# this could be stored as a string and converted in the application
# to a datetime instance
return Microbe(**kwargs)
elif cl == 'metagenome':
return Metagenome(**kwargs)
elif cl == 'pathogen':
return Pathogen(**kwargs)
else:
raise RuntimeError('unknown sample type [{}]'.format(cl))
class Microbe(Sample):
"""
Inheritence example. For this simple single-table case, no additional id is required
"""
__tablename__ = 'microbe'
id = Column(Integer, ForeignKey('sample.id'), primary_key=True)
strain = Column(String)
isolate = Column(String)
host = Column(String)
isolation_source = Column(String)
sample_type = Column(String, nullable=False)
__mapper_args__ = {'polymorphic_identity': 'microbe'}
__table_args__ = (
CheckConstraint('strain is not null or isolate is not null', name='check1'),
CheckConstraint('host is not null or isolation_source is not null', name='check2'),
)
class Pathogen(Sample):
__tablename__ = 'pathogen'
id = Column(Integer, ForeignKey('sample.id'), primary_key=True)
strain = Column(String)
isolate = Column(String)
collected_by = Column(String)
isolation_source = Column(String)
__mapper_args__ = {'polymorphic_identity': 'pathogen'}
__table_args__ = (
CheckConstraint('strain is not null or isolate is not null', name='check1'),
)
class Metagenome(Sample):
__tablename__ = 'metagenome'
id = Column(Integer, ForeignKey('sample.id'), primary_key=True)
host = Column(String)
isolation_source = Column(String)
__mapper_args__ = {'polymorphic_identity': 'metagenome'}
__table_args__ = (
CheckConstraint('host is not null or isolation_source is not null', name='check1'),
)
# many-to-many join tables
pool_library_table = Table('pool_library', Base.metadata,
Column('pool_id', Integer, ForeignKey('pool.id')),
Column('library_id', Integer, ForeignKey('library.id')))
class Library(Base):
"""
A library should encompass all that there is to know about the creation of a sequencing library.
What is required will depend on the library type, such as: amplicon, wgs, hic, meta3c
"""
__tablename__ = 'library'
id = Column(Integer, primary_key=True)
# barcodes could be a separate table acting as an enumeration and tracking oligo batches.
barcode = Column(String)
creation_date = Column(Date)
# status/step could be used to track process of creating library
status = Column(String)
tray = Column(String)
well = Column(String)
# bioanalyzer concentration
ba_conc = Column(Float)
# nano-run read count
nano_count = Column(Integer)
# foreign key of parent library
sample_id = Column(Integer, ForeignKey('sample.id'))
# many-to-many with pools
pools = relationship('Pool', secondary=pool_library_table, back_populates='libraries')
run_pool_table = Table('run_pool', Base.metadata,
Column('run_id', Integer, ForeignKey('run.id')),
Column('pool_id', Integer, ForeignKey('pool.id')))
class Pool(Base):
"""
A pool represents a combination of one or more libraries, before submitting as a run.
"""
__tablename__ = 'pool'
id = Column(Integer, primary_key=True)
# some measure of concentration
molarity = Column(Float)
# many-to-many with library
libraries = relationship('Library', secondary=pool_library_table, back_populates='pools')
# many-to-many with run
runs = relationship('Run', secondary=run_pool_table, back_populates='pools')
class Run(Base):
"""
An actual sequencing run. This information would be populated at the time a run is handed
to a sequencing facility, and would require updating once results are returned.
"""
__tablename__ = 'run'
id = Column(Integer, primary_key=True)
facility = Column(String)
machine_type = Column(String)
cell_type = Column(String) # redundant perhaps
run_type = Column(String)
run_date = Column(Date)
data_path = Column(String)
# many-to-many with pool
pools = relationship('Pool', secondary=run_pool_table, back_populates='runs') | gpl-3.0 |
jadsonjs/DataScience | MachineLearning/print_ensemble_precisions.py | 1 | 2523 | #
# This program is distributed without any warranty and it
# can be freely redistributed for research, classes or private studies,
# since the copyright notices are not removed.
#
# This file just read the data to calculate the statistical test
#
# Jadson Santos - jadsonjs@gmail.com
#
# to run this exemple install pyhton modules:
#
# pip3 install pandas
#
# Python Data Analysis Library
# https://pandas.pydata.org
import pandas as pd
# This module provides functions for calculating mathematical statistics of numeric (Real-valued) data.
# https://docs.python.org/3/library/statistics.html
import statistics
#
# PUT THE RESULT DIRECTORY AND ENSEMBLE ALGORITHM GENEREATED BY WEKA ON HERE
#
# read the CSV file with your data base and put into a Pandas DataFrame
# https://www.shanelynn.ie/using-pandas-dataframe-creating-editing-viewing-data-in-python/
#
directory = '/Users/jadson/tmp/results/' # where are the files generated by weka
#
# prints the data of all homogeneous ensemble
#
def printHomogeneo():
for model in ['knn', 'ad', 'nb', 'mlp']:
for ensemble in ['bagging', 'boosting', 'stacking_homogeneo']:
print(' -------------------- ')
print(model+' --> '+ensemble)
print(' -------------------- ')
for num_classifiers in [10, 15, 20]:
df = pd.read_csv( directory+ensemble+'_'+model+'_'+str(num_classifiers)+'.csv' )
#Getting the precision data
precision = df['IR_precision'].values
# {0} is the num of argument of format function : {.4} sets the precision to 4 decimals.
for p in range(len(precision)):
print('{0:.4}'.format(precision[p]))
#
# prints the data of all heterogeneous ensemble
#
def printHeterogeneo():
for ensemble in ['stacking_heterogeneo']:
print(' -------------------- ')
print(ensemble)
print(' -------------------- ')
for model in ['MLP_AD', 'MLP_NB', 'MLP_NB_AD', 'NB_AD']:
for num_classifiers in [10, 15, 20]:
df = pd.read_csv( directory+ensemble+'_'+model+'_'+str(num_classifiers)+'.csv' )
#Getting the precision data
precision = df['IR_precision'].values
# {0} is the num of argument of format function : {.4} sets the precision to 4 decimals.
for p in range(len(precision)):
print('{0:.4}'.format(precision[p]))
printHomogeneo()
printHeterogeneo()
| apache-2.0 |
wangqingbaidu/aliMusic | gen_X_features/user_clustering_artist_taset.py | 1 | 1195 | # -*- coding: UTF-8 -*-
'''
Authorized by vlon Jang
Created on May 25, 2016
Email:zhangzhiwei@ict.ac.cn
From Institute of Computing Technology
All Rights Reserved.
'''
from sklearn.cluster import MiniBatchKMeans, KMeans
import pandas as pd
import numpy as np
import pymysql
mysql_cn= pymysql.connect(host='localhost', port=3306,user='root', passwd='111111', db='music')
def gen_cluster(keys = None, cluster_matrix = None):
km = MiniBatchKMeans(n_clusters=50, batch_size=1000)
# km = KMeans(n_jobs=-1, n_clusters=50)
print "Clustering data..."
labels = pd.DataFrame(km.fit_predict(cluster_matrix.values))
res = pd.concat([keys, labels], axis = 1, ignore_index=True)
return res
def get_data():
print "Getting data form db..."
df = pd.read_sql('select * from user_artist_taste', mysql_cn)
df = df.fillna(value=0)
return df
if __name__ == "__main__":
df = get_data()
keys = df.pop('user_id')
df = gen_cluster(keys, df)
df.columns = ['user_id', 'label']
df.to_sql('user_taste_labels', mysql_cn, flavor='mysql', if_exists='replace',
index = False)
print "Wrote to db!"
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.