repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
jemromerol/apasvo | apasvo/picking/apasvotrace.py | 1 | 30191 | # encoding: utf-8
'''
@author: Jose Emilio Romero Lopez
@copyright: Copyright 2013-2014, Jose Emilio Romero Lopez.
@license: GPL
@contact: jemromerol@gmail.com
This file is part of APASVO.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
import obspy as op
import multiprocessing as mp
import itertools
from obspy.core.utcdatetime import UTCDateTime
from obspy.core.event import Pick
from obspy.core.event import ResourceIdentifier
from obspy.core.event import CreationInfo
from obspy.core.event import WaveformStreamID
from obspy.core.event import Comment
from obspy.core.event import Catalog
from obspy.core.event import Event
from obspy.signal import filter
import csv
import copy
import os
import uuid
import gc
from copy import deepcopy
from apasvo.picking import takanami
from apasvo.picking import envelope as env
from apasvo.utils.formats import rawfile
from apasvo.utils import collections
from apasvo.utils import clt
method_other = 'other'
method_takanami = 'Takanami'
method_stalta = 'STALTA'
method_stalta_takanami = 'STALTA+Takanami'
method_ampa = 'AMPA'
method_ampa_takanami = 'AMPA+Takanami'
ALLOWED_METHODS = (
method_other,
method_takanami,
method_stalta,
method_stalta_takanami,
method_ampa,
method_ampa_takanami
)
PHASE_VALUES = (
"P",
"S",
"Other",
)
mode_manual = 'manual'
mode_automatic = 'automatic'
status_preliminary = 'preliminary'
status_reviewed = 'reviewed'
status_confirmed = 'confirmed'
status_rejected = 'rejected'
status_final = 'final'
DEFAULT_DTYPE = '=f8' # Set the default datatype as 8 bits floating point, native ordered
DEFAULT_DELTA = 0.02
def generate_csv(records, fout, delimiter=',', lineterminator='\n'):
"""Generates a Comma Separated Value (CSV) resume file from a list of
Record objects.
The function stores into a file a summary table of the events found
for a given list of records. The table has the following fields:
file_name: Name of the file (absolute path) that stores the data
signal where the event was found.
time: Event arrival time, in seconds from the beginning of the signal.
cf_value: Characteristic function value at the event arrival time.
name: An arbitrary string that identifies the event.
method: A string indicating the algorithm used to find the event.
Possible values are: 'STA-LTA', 'STA-LTA+Takanami', 'AMPA',
'AMPA+Takanami' and 'other'.
mode: Event picking mode. Possible values are: 'manual', 'automatic'
and 'undefined'.
status: Revision status of the event. Possible values are: 'preliminary',
'revised', 'confirmed', 'rejected' and 'undefined'.
comments: Additional comments.
Args:
records: A list of record objects.
fout: Output file object.
delimiter: A delimiter character that separates fields/columns.
Default character is ','.
lineterminator: A delimiter character that separates records/rows.
"""
# Extract data from records
rows = [{'file_name': record.filename,
'time': record.starttime + event.time,
'cf_value': event.cf_value,
'name': event.name,
'method': event.method,
'mode': event.evaluation_mode,
'status': event.evaluation_status,
'comments': event.comments} for record in records
for event in record.events]
# Write data to csv
writer = csv.DictWriter(fout, ['file_name', 'time', 'cf_value', 'name',
'method', 'mode', 'status', 'comments'],
delimiter=delimiter, lineterminator=lineterminator)
writer.writeheader()
for row in rows:
writer.writerow(row)
class ApasvoEvent(Pick):
"""A seismic event found in a Record instance.
This class stores several attributes used to describe a possible event
found in a seismic signal, as well as data results from the computation
of Takanami algorithm in order to refine the arrival time of the event.
Attributes:
record: Record instance where the event was found.
time: Event arrival time, given in samples from the beginning of
record.signal.
cf_value: Characteristic function value at the event arrival time.
name: An arbitrary string that identifies the event.
Default: ''.
comments: Additional comments.
Default: ''.
method: A string indicating the algorithm used to find the event.
Possible values are: 'STALTA', 'STALTA+Takanami', 'AMPA',
'AMPA+Takanami' and 'other'.
Default: 'other'.
Default: 'preliminary'.
n0_aic: Start time point of computed AIC values. The value is given in
samples from the beginning of record.signal.
aic: List of AIC values from n0_aic.
"""
methods = (method_other, method_takanami, method_stalta,
method_stalta_takanami, method_ampa, method_ampa_takanami)
def __init__(self,
trace,
time,
name='',
comments='',
method=method_other,
phase_hint=None,
polarity='undecidable',
aic=None,
n0_aic=None,
*args, **kwargs):
self.trace = trace
if time < 0 or time >= len(self.trace.signal):
raise ValueError("Event position must be a value between 0 and %d"
% len(self.trace.signal))
self.stime = time
self.name = name
self.method = method
self.aic = aic
self.n0_aic = n0_aic
phase_hint = phase_hint if phase_hint in PHASE_VALUES else PHASE_VALUES[0]
super(ApasvoEvent, self).__init__(time=self.time,
method_id=ResourceIdentifier(method),
phase_hint=phase_hint,
polarity=polarity,
creation_info=CreationInfo(
author=kwargs.get('author', ''),
agency_id=kwargs.get('agency', ''),
creation_time=UTCDateTime.now(),
),
waveform_id=WaveformStreamID(
network_code=self.trace.stats.get('network', ''),
station_code=self.trace.stats.get('station', ''),
location_code=self.trace.stats.get('location', ''),
channel_code=self.trace.stats.get('channel', ''),
),
*args,
**kwargs)
self.comments = comments
@property
def cf_value(self):
if 0 <= self.stime < len(self.trace.cf):
return self.trace.cf[self.stime]
else:
return np.nan
def _samples_to_seconds(self, value):
return self.trace.starttime + (self.trace.delta * value)
def _seconds_to_samples(self, value):
return int((value - self.trace.starttime) / self.trace.delta)
def __setattr__(self, key, value):
if key == 'stime':
self.__dict__[key] = value
self.__dict__['time'] = self._samples_to_seconds(value)
elif key == 'time':
self.__dict__[key] = value
self.__dict__['stime'] = self._seconds_to_samples(value)
elif key == 'comments':
self.__dict__['comments'] = Comment(text=value)
else:
super(ApasvoEvent, self).__setattr__(key, value)
def __getattribute__(self, item):
if item == 'comments':
return self.__dict__['comments'].text
else:
return super(ApasvoEvent, self).__getattribute__(item)
def plot_aic(self, show_envelope=True, num=None, **kwargs):
"""Plots AIC values for a given event object.
Draws a figure with two axes: the first one plots magnitude and
envelope of 'self.signal' and the second one plots AIC values computed
after applying Takanami AR method to 'event'. Plotted data goes from
'event.n0_aic' to 'event.n0_aic + len(event.aic)'.
Args:
show_envelope: Boolean value to specify whether to plot the
envelope of 'signal' or not. This function will be drawn
preferably on the first axis together with amplitude of
'signal'.
Default: True.
num: Identifier of the returned MatplotLib figure, integer type.
Default None, which means an identifier value will be
automatically generated.
Returns:
fig: A MatplotLib Figure instance.
"""
if self.aic is None or self.n0_aic is None:
raise ValueError("Event doesn't have AIC data to plot")
# Lazy matplotlib import
import matplotlib.pyplot as pl
from matplotlib import ticker
# Set limits
i_from = int(max(0, self.n0_aic))
i_to = int(min(len(self.trace.signal), self.n0_aic + len(self.aic)))
# Create time sequence
t = np.arange(i_from, i_to) / float(self.trace.fs)
# Create figure
fig, _ = pl.subplots(2, 1, sharex='all', num=num)
fig.canvas.set_window_title(self.trace.label)
fig.set_tight_layout(True)
# Configure axes
for ax in fig.axes:
ax.cla()
ax.grid(True, which='both')
formatter = ticker.FuncFormatter(lambda x, pos: clt.float_secs_2_string_date(x, self.trace.starttime))
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=5, prune='lower'))
ax.set_xlabel('Time (seconds)')
pl.setp(ax.get_xticklabels(), visible=True)
# Draw signal
fig.axes[0].set_title('Signal Amplitude')
fig.axes[0].set_ylabel('Amplitude')
fig.axes[0].plot(t, self.trace.signal[i_from:i_to], color='black',
label='Signal')
# Draw envelope
if show_envelope:
fig.axes[0].plot(t, env.envelope(self.trace.signal[i_from:i_to]),
color='r', label='Envelope')
fig.axes[0].legend(loc=0, fontsize='small')
# Draw AIC
fig.axes[1].set_title('AIC')
fig.axes[1].plot(t, self.aic)
# Draw event
for ax in fig.axes:
vline = ax.axvline(self.stime / self.trace.fs, label="Event")
vline.set(color='r', ls='--', lw=2)
# Configure limits and draw legend
for ax in fig.axes:
ax.set_xlim(t[0], t[-1])
ax.legend(loc=0, fontsize='small')
return fig
class ApasvoTrace(op.Trace):
"""A seismic data trace.
The class contains a seismic data trace.
Attributes:
signal: Seismic data, numpy array type.
fs: Sample rate in Hz.
cf: Characteristic function, numpy array type, from the beginning
of signal.
events: A list of events.
label: A string that identifies the stored seismic data.
Default: ''.
description: Additional comments.
Default: ''.
"""
def __init__(self,
data=None,
header=None,
label='',
description='',
filename='',
normalize=True,
use_filtered=False,
**kwargs):
"""Initializes a Record instance.
Args:
label: A string that identifies the seismic record. Default: ''.
description: Additional comments.
"""
# Cast data to default datatype
if data is None:
data = np.ndarray((0,), dtype=DEFAULT_DTYPE)
super(ApasvoTrace, self).__init__(data, header)
self.cf = np.array([], dtype=DEFAULT_DTYPE)
if normalize:
self.data = self.data - np.mean(self.data)
#self.data = self.data/ np.max(np.abs(self.data))
self.filtered_signal = deepcopy(self.data)
self.events = []
self.label = label
self.description = description
self.filename = filename
self.use_filtered = False
# Get an uuid for each trace
self.uuid = unicode(uuid.uuid4())
@property
def fs(self):
return 1. / self.stats.delta
@property
def delta(self):
return self.stats.delta
@property
def signal(self):
return self.data if not self.use_filtered else self.filtered_signal
@property
def starttime(self):
return self.stats.starttime
@property
def endtime(self):
return self.stats.endtime
@property
def short_name(self):
return "{0} | {1}".format(os.path.basename(self.filename), self.id)
@property
def name(self):
return "{0} | {1}".format(os.path.basename(self.filename), str(self))
def detect(self, alg, threshold=None, peak_window=1.0,
takanami=False, takanami_margin=5.0, action='append', debug=False, **kwargs):
"""Computes a picking algorithm over self.signal.
Args:
alg: A detection/picking algorithm object, e. g. a
picking.ampa.Ampa or picking.stalta.StaLta instance.
threshold: Local maxima found in the characteristic function above
this value will be returned by the function as possible events
(detection mode).
If threshold is None, the function will return only the global
maximum (picking mode).
Default value is None.
peak_window: How many seconds on each side of a point of the
characteristic function to use for the comparison to consider
the point to be a local maximum.
If 'threshold' is None, this parameter has no effect.
Default value is 1 s.
takanami: A boolean parameter to specify whether Takanami AR method
will be applied over results or not.
Default: False, Takanami wont be applied over results.
takanami_margin: How many seconds on each side of an event time to
use for the application of Takanami method.
If 'takanami' is False, this parameter has no effect.
Default: 5.0 seconds.
action: Two valid choices: 'append' and 'clear'. 'append' adds the
events found to the end of the list of events, while 'clear'
removes the existing events of the list.
Default: 'append'.
Returns:
events: A resulting list of Event objects.
"""
et, self.cf = alg.run(self.signal, self.fs, threshold=threshold,
peak_window=peak_window)
# Build event list
events = []
for t in et:
# set method name
method_name = alg.__class__.__name__.upper()
if method_name not in ApasvoEvent.methods:
method_name = method_other
events.append(ApasvoEvent(self, t, method=method_name,
evaluation_mode=mode_automatic,
evaluation_status=status_preliminary))
# Refine arrival times
if takanami:
events = self.refine_events(events, takanami_margin=takanami_margin)
# Update event list
if action == 'append':
self.events.extend(events)
elif action == 'clear':
self.events = events
else:
raise ValueError("%s is not a valid value for 'action'" % action)
if debug:
print "{} event(s) found so far for trace {}:".format(len(self.events), self.getId())
for event in self.events:
print event.time
return self.events
def sort_events(self, key='time', reverse=False):
"""Sort event list.
Args:
key: Name of the attribute of Event class to use as sorting key.
Default: 'time'.
reverse: Determines whether to sort in reverse order or not.
Default: False.
Returns:
events: Sorted event list.
"""
if key == 'aic':
raise ValueError("Sorting not allowed using key 'aic'")
self.events = sorted(self.events,
key=lambda e: e.__dict__.get(key, None),
reverse=reverse)
return self.events
def refine_events(self, events, t_start=None, t_end=None, takanami_margin=5.0):
"""Computes Takanami AR method over self.events.
Args:
takanami_margin: How many seconds on each side of an event time to
use for the application of Takanami method.
If 'takanami' is False, this parameter has no effect.
Default: 5.0 seconds.
Returns:
events: A resulting list of Event objects.
"""
taka = takanami.Takanami()
for event in events:
t_start = (event.stime / self.fs) - takanami_margin
t_end = (event.stime / self.fs) + takanami_margin
et, event.aic, event.n0_aic = taka.run(self.signal, self.fs,
t_start, t_end)
event.stime = et
# set event method
if event.method == method_ampa:
event.method = method_ampa_takanami
elif event.method == method_stalta:
event.method = method_stalta_takanami
else:
event.method = method_takanami
return events
def bandpass_filter(self, freqmin, freqmax, *args, **kwargs):
self.filtered_signal = filter.bandpass(self.data, freqmin, freqmax, self.fs, *args, **kwargs)
return self.filtered_signal
def save_cf(self, fname, fmt=rawfile.format_text,
dtype=rawfile.datatype_float64,
byteorder=rawfile.byteorder_native):
"""Saves characteristic function in a file.
Args:
fname: Output file name.
fmt: A string indicating the format to store the CF.
Possible values are: 'binary' or 'text'.
Default value: 'binary'.
dtype: Data-type to represent characteristic function values.
Default: 'float64'.
byteorder: Byte-order to store characteristic function values.
Valid values are: 'little-endian', 'big-endian' or 'native'.
Default: 'native'.
"""
if fmt == 'binary':
fout_handler = rawfile.BinFile(fname, dtype=dtype,
byteorder=byteorder)
else:
fout_handler = rawfile.TextFile(fname, dtype=dtype,
byteorder=byteorder)
fout_handler.write(self.cf, header="Sample rate: %g Hz." % self.fs)
def plot_signal(self, t_start=0.0, t_end=np.inf, show_events=True,
show_x=True, show_cf=True, show_specgram=True,
show_envelope=True, threshold=None, num=None, **kwargs):
"""Plots record data.
Draws a figure containing several plots for data stored and computed
by a Record object: magnitude, envelope and spectrogram plots for
self.signal, as well as characteristic function if calculated.
Args:
t_start: Start time of the plotted data segment, in seconds.
Default: 0.0, that is the beginning of 'signal'.
t_end: End time of the plotted data segment, in seconds.
Default: numpy.inf, that is the end of 'signal'
show_events: Boolean value to specify whether to plot
event arrival times or not. Arrival times will be
indicated by using a vertical line.
Default: True.
show_x: Boolean value to specify whether to plot the
magnitude value of 'signal' or not. This function
will be drawn preferably on the first axis.
Default: True.
show_cf: Boolean value to specify whether to plot the
characteristic function or not. This function
will be drawn preferably on the second axis.
Default: True.
show_specgram: Boolean value to specify whether to plot the
spectrogram of 'signal' or not. It will be drawn preferably
on the third axis.
Default: True.
show_envelope: Boolean value to specify whether to plot the
envelope of 'signal' or not. This function will be drawn
preferably on the first axis together with amplitude of
'signal'.
Default: True.
threshold: Boolean value to specify whether to plot threshold
or not. Threshold will be drawn as an horizontal dashed line
together with characteristic function.
Default: True.
num: Identifier of the returned MatplotLib figure, integer type.
Default None, which means an identifier value will be
automatically generated.
Returns:
fig: A MatplotLib Figure instance.
"""
# Lazy matplotlib import
import matplotlib.pyplot as pl
from matplotlib import ticker
# Set limits
i_from = int(max(0.0, t_start * self.fs))
if show_cf:
i_to = int(min(len(self.cf), t_end * self.fs))
else:
i_to = int(min(len(self.signal), t_end * self.fs))
# Create time sequence
t = np.arange(i_from, i_to) / float(self.fs)
# Create figure
nplots = show_x + show_cf + show_specgram
fig, _ = pl.subplots(nplots, 1, sharex='all', num=num)
fig.canvas.set_window_title(self.label)
fig.set_tight_layout(True)
# Configure axes
for ax in fig.axes:
ax.cla()
ax.grid(True, which='both')
formatter = ticker.FuncFormatter(lambda x, pos: clt.float_secs_2_string_date(x, self.starttime))
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=5, prune='lower'))
ax.set_xlabel('Time (seconds)')
pl.setp(ax.get_xticklabels(), visible=True)
# Draw axes
ax_idx = 0
# Draw signal
if show_x:
fig.axes[ax_idx].set_title("Signal Amplitude (%gHz)" % self.fs)
fig.axes[ax_idx].set_ylabel('Amplitude')
fig.axes[ax_idx].plot(t, self.signal[i_from:i_to], color='black',
label='Signal')
#fig.axes[ax_idx].plot(t, signal_norm, color='black',
#label='Signal')
# Draw signal envelope
if show_envelope:
fig.axes[ax_idx].plot(t, env.envelope(self.signal[i_from:i_to]),
color='r', label='Envelope')
fig.axes[ax_idx].legend(loc=0, fontsize='small')
ax_idx += 1
# Draw Characteristic function
if show_cf:
fig.axes[ax_idx].set_title('Characteristic Function')
fig.axes[ax_idx].plot(t, self.cf[i_from:i_to])
# Draw threshold
if threshold:
hline = fig.axes[ax_idx].axhline(threshold, label="Threshold")
hline.set(color='b', ls='--', lw=2, alpha=0.8)
fig.axes[ax_idx].legend(loc=0, fontsize='small')
ax_idx += 1
# Draw spectrogram
if show_specgram:
fig.axes[ax_idx].set_title('Spectrogram')
fig.axes[ax_idx].set_ylabel('Frequency (Hz)')
fig.axes[ax_idx].specgram(self.signal[i_from:i_to], Fs=self.fs,
xextent=(i_from / self.fs, i_to / self.fs))
ax_idx += 1
# Draw event markers
if show_events:
for event in self.events:
arrival_time = event.stime / self.fs
for ax in fig.axes:
xmin, xmax = ax.get_xlim()
if arrival_time > xmin and arrival_time < xmax:
vline = ax.axvline(arrival_time, label="Event")
vline.set(color='r', ls='--', lw=2)
ax.legend(loc=0, fontsize='small')
# Configure limits and draw legend
for ax in fig.axes:
ax.set_xlim(t[0], t[-1])
return fig
def add_event_from_copy(self, event):
event = copy.copy(event)
event.trace = self
event.aic = None
event.n0_aic = None
self.events.append(event)
def _detect(parameters):
alg = parameters[0]
trace_list = parameters[1]
kwargs = parameters[2]
for trace in trace_list:
trace.detect(alg, **kwargs)
return trace_list
class ApasvoStream(op.Stream):
"""
A list of multiple ApasvoTrace objects
"""
def __init__(self, traces, description='', filename='', **kwargs):
super(ApasvoStream, self).__init__(traces)
self.description = description
self.filename = filename
def detect(self, alg, trace_list=None, allow_multiprocessing=True, **kwargs):
"""
"""
trace_list = self.traces if trace_list is None else trace_list[:]
n_traces = len(trace_list)
if allow_multiprocessing and n_traces > 1:
processes = min(mp.cpu_count(), n_traces)
p = mp.Pool(processes=processes)
processed_traces = p.map(_detect, itertools.izip(itertools.repeat(alg),
collections.chunkify(trace_list, n_traces / processes),
itertools.repeat(kwargs)))
processed_traces = collections.flatten_list(processed_traces)
# Update existing traces w. new events and cf from processed events
for trace, processed_trace in zip(trace_list, processed_traces):
new_events = [event for event in processed_trace.events if event not in trace.events]
for event in new_events:
trace.add_event_from_copy(event)
trace.cf = processed_trace.cf[:]
# Cleanup
del processed_traces
del trace_list
p.close()
p.join()
gc.collect(2)
else:
_detect((alg, trace_list, kwargs))
def export_picks(self, filename, trace_list=None, format="NLLOC_OBS", debug=False, **kwargs):
"""
"""
trace_list = self.traces if trace_list is None else trace_list
event_list = []
for trace in trace_list:
event_list.extend([Event(picks=[pick]) for pick in trace.events])
# Export to desired format
if format == 'NLLOC_OBS':
basename, ext = os.path.splitext(filename)
for event in event_list:
ts = event.picks[0].time.strftime("%Y%m%d%H%M%S%f")
event_filename = "%s_%s%s" % (basename, ts, ext)
if debug:
print "Generating event file {}".format(event_filename)
event.write(event_filename, format=format)
else:
event_catalog = Catalog(event_list)
if debug:
print "Generating event file {}".format(filename)
event_catalog.write(filename, format=format, **kwargs)
def read(filename,
format=None,
dtype='float64',
byteorder='native',
description='',
normalize=True,
*args, **kwargs):
"""
Read signal files into an ApasvoStream object
:param filename:
:param format:
:param file_dtype:
:param file_byteorder:
:param description:
:param args:
:param kwargs:
:return:
"""
# Try to read using obspy core functionality
try:
traces = [ApasvoTrace(copy.deepcopy(trace.data), copy.deepcopy(trace.stats), filename=filename, normalize=normalize) \
for trace in op.read(filename, format=format, *args, **kwargs).traces]
# Otherwise try to read as a binary or text file
except Exception as e:
fhandler = rawfile.get_file_handler(filename,
format=format,
dtype=dtype,
byteorder=byteorder)
trace = ApasvoTrace(fhandler.read().astype(DEFAULT_DTYPE, casting='safe'), filename=filename)
sample_fs = kwargs.get('fs')
trace.stats.delta = DEFAULT_DELTA if sample_fs is None else 1. / sample_fs
traces = [trace]
# Convert Obspy traces to apasvo traces
return ApasvoStream(traces, description=description, filename=filename)
| gpl-3.0 |
roxyboy/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
TimothyHelton/k2datascience | k2datascience/cluster.py | 1 | 9060 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Clustering Module
.. moduleauthor:: Timothy Helton <timothy.j.helton@gmail.com>
"""
import logging
import os.path as osp
import bokeh.io as bkio
import bokeh.models as bkm
import bokeh.plotting as bkplt
from bokeh.sampledata.us_states import data as states
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy as spch
import seaborn as sns
import sklearn
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from k2datascience.utils import ax_formatter, size, save_fig
log_format = ('%(asctime)s %(levelname)8s -> %(name)s <- '
'(line: %(lineno)d) %(message)s\n')
date_format = '%m/%d/%Y %I:%M:%S'
logging.basicConfig(format=log_format, datefmt=date_format,
level=logging.INFO)
current_dir = osp.dirname(osp.realpath(__file__))
data_dir = osp.realpath(osp.join(current_dir, '..', 'data', 'clustering'))
arrests_data = osp.join(data_dir, 'us_arrests.csv')
genes_data = osp.join(data_dir, 'genes.csv')
class Cluster:
"""
Attributes and methods related to general clustering of a dataset.
:Attributes:
- **data**: *DataFrame* data
- **linking**: *ndarray*
- **model**: classification model type
- **n_components**: *int* number of principle components to find
- **pca**: *sklearn.PCA* scikit-learn instance of PCA class
- **std_x**: *DataFrame* standardized data
- **var_pct**: *pd.Series* principle components variance percentage
- **var_pct_cum**: *pd.Series* principle components cumulative variance /
percentage
"""
def __init__(self):
self.clusters = None
self.data = None
self.data_file = None
self.data_types = {}
self.linkage = None
self.n_components = 3
self.pca = None
self.std_x = None
self.var_pct = None
self.var_pct_cum = None
def __repr__(self):
return 'Cluster()'
def calc_pca(self):
"""
Calculate the principle components for the dataset.
"""
self.pca = PCA(n_components=self.n_components)
self.pca.fit(self.std_x)
self.var_pct = pd.Series(self.pca.explained_variance_ratio_)
self.var_pct_cum = self.var_pct.cumsum()
def calc_pca_eq(self):
"""
Calculate PCA Proportion of Variance Explained (PVE).
:return: PVE
:rtype: ndarray
"""
if self.pca is None:
self.calc_pca()
numerator = (
np.sum(np.square(np.matmul(self.std_x,
np.transpose(self.pca.components_))),
axis=0)
)
denominator = np.sum(np.square(self.std_x))
return numerator / denominator
def hierarchical_cluster(self, n_clusters, criterion='maxclust',
method='ward', metric='euclidean'):
"""
Plot agglomerative hierarchical clustering dendrogram.
:param int n_clusters: number of clusters
:param str criterion: criterion to use in forming flat clusters
:param str method: agglomerative clustering method to be used
:param str metric: distance metric
"""
self.linkage = spch.linkage(self.data, method=method, metric=metric)
self.clusters = pd.Series(spch.fcluster(self.linkage,
n_clusters,
criterion),
index=self.data.index)
class Arrests(Cluster):
"""
Attributes and methods related to the US Arrests dataset.
"""
def __init__(self):
super().__init__()
self.data_file = arrests_data
self.data_types = {
'state': 'category',
'murder': np.float,
'assault': np.int,
'urban_pop': np.int,
'rape': np.float,
}
self.load_data()
def __repr__(self):
return 'Arrests()'
def load_data(self):
"""
Load dataset
"""
self.data = pd.read_csv(self.data_file,
dtype=self.data_types,
header=None,
index_col=0,
names=self.data_types.keys(),
skiprows=1,
)
self.std_x = (sklearn.preprocessing
.StandardScaler()
.fit_transform(self.data))
def us_map_clusters(self):
"""
Plot clusters on US map
"""
try:
del states['DC']
except KeyError:
pass
arrests_color = (self.clusters
.copy()
.astype('category'))
arrests_color.cat.categories = ['#787f51', '#cd5b1b', '#c19408']
source = bkm.ColumnDataSource({
'xs': [states[code]['lons'] for code in states],
'ys': [states[code]['lats'] for code in states],
'color': list(arrests_color.values),
'label': [f'Cluster: {x}' for x in self.clusters.values],
})
p = bkplt.figure(title='US Arrests', toolbar_location='right',
plot_width=800, plot_height=600,
x_range=bkm.Range1d(-180, -65))
p.patches(xs='xs', ys='ys', color='color', legend='label',
source=source, fill_alpha=0.8, line_color='#000000',
line_width=2, line_alpha=0.3)
bkio.show(p)
class Genes(Cluster):
"""
Attributes and methods related to the Genes dataset.
"""
def __init__(self):
super().__init__()
self.data_file = genes_data
self.load_data()
def __repr__(self):
return 'Genes()'
def load_data(self):
"""
Load dataset
"""
self.data = pd.read_csv(self.data_file, header=None).T
def box_plot(self, save=False, title=None):
"""
Box plot of the dataset.
:param bool save: if True the figure will be saved
:param str title: dataset title
"""
plt.figure('Box Plot', figsize=(16, 8),
facecolor='white', edgecolor='black')
rows, cols = (1, 1)
ax0 = plt.subplot2grid((rows, cols), (0, 0))
sns.boxplot(data=self.data, ax=ax0)
ax0.set_title('Genes Dataset', fontsize=size['title'])
save_fig('genes_box_plot', save)
def unique_genes(self):
"""
Determine the most unique genes.
"""
if self.pca is None:
self.calc_pca()
return (pd.Series(np.sum(np.abs(self.pca.components_), axis=0))
.sort_values(ascending=False)
.head())
class Simulated(Cluster):
"""
Attributes and methods related to a simulated dataset.
:Attributes:
- **kmeans**: *KMeans* sklearn KMeans cluster
- **trans**: *ndarray* data translated into the Principle Components space
"""
def __init__(self):
super().__init__()
self.kmeans = None
self.trans = None
self.load_data()
def __repr__(self):
return 'Simulated()'
def load_data(self):
"""
Initialize the data attribute.
"""
np.random.seed(0)
x1 = np.random.normal(loc=0, scale=1, size=(20, 50))
x2 = np.random.normal(loc=1, scale=0.5, size=(20, 50))
x3 = np.random.normal(loc=2, scale=1, size=(20, 50))
self.data = pd.DataFrame(np.r_[x1, x2, x3])
self.clusters = np.array([[0] * 20, [1] * 20, [2] * 20]).flatten()
self.std_x = (sklearn.preprocessing
.StandardScaler()
.fit_transform(self.data))
def plot_pca(self):
"""
Plot first two principle components.
"""
if not self.pca:
self.calc_pca()
p = bkplt.figure(title='2nd vs 1st Principle Component')
colormap = {0: 'red', 1: 'green', 2: 'blue'}
colors = [colormap[x] for x in self.clusters]
self.trans = self.pca.transform(self.data)
p.circle(self.trans[:, 0], self.trans[:, 1], color=colors,
fill_alpha=0.2, size=10)
bkio.show(p)
def calc_kmeans(self, data, n_clusters):
"""
Calculate K Means clusters
:param ndarray data: data to be clustered
:param int n_clusters: number of clusters
"""
if not self.pca:
self.calc_pca()
self.trans = self.pca.transform(self.data)
self.kmeans = KMeans(n_clusters=n_clusters).fit(data)
p = bkplt.figure(title='KMeans Clustering')
colormap = {0: 'red', 1: 'green', 2: 'blue', 3: 'chartreuse'}
colors = [colormap[x] for x in self.kmeans.labels_]
p.circle(self.trans[:, 0], self.trans[:, 1], color=colors,
fill_alpha=0.2, size=10)
bkio.show(p)
| bsd-3-clause |
loli/sklearn-ensembletrees | sklearn/svm/tests/test_bounds.py | 42 | 2112 | import nose
from nose.tools import assert_true
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['l2', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'l2': LinearSVC(loss='l2', penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
rexshihaoren/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 12 | 10796 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/core/strings.py | 1 | 45903 | import numpy as np
from pandas.compat import zip
from pandas.core.common import isnull, _values_from_object, is_bool_dtype
import pandas.compat as compat
from pandas.util.decorators import Appender, deprecate_kwarg
import re
import pandas.lib as lib
import warnings
import textwrap
_shared_docs = dict()
def _get_array_list(arr, others):
from pandas.core.series import Series
if len(others) and isinstance(_values_from_object(others)[0],
(list, np.ndarray, Series)):
arrays = [arr] + list(others)
else:
arrays = [arr, others]
return [np.asarray(x, dtype=object) for x in arrays]
def str_cat(arr, others=None, sep=None, na_rep=None):
"""
Concatenate strings in the Series/Index with given separator.
Parameters
----------
others : list-like, or list of list-likes
If None, returns str concatenating strings of the Series
sep : string or None, default None
na_rep : string or None, default None
If None, an NA in any array will propagate
Returns
-------
concat : Series/Index of objects or str
Examples
--------
If ``others`` is specified, corresponding values are
concatenated with the separator. Result will be a Series of strings.
>>> Series(['a', 'b', 'c']).str.cat(['A', 'B', 'C'], sep=',')
0 a,A
1 b,B
2 c,C
dtype: object
Otherwise, strings in the Series are concatenated. Result will be a string.
>>> Series(['a', 'b', 'c']).str.cat(sep=',')
'a,b,c'
Also, you can pass a list of list-likes.
>>> Series(['a', 'b']).str.cat([['x', 'y'], ['1', '2']], sep=',')
0 a,x,1
1 b,y,2
dtype: object
"""
if sep is None:
sep = ''
if others is not None:
arrays = _get_array_list(arr, others)
n = _length_check(arrays)
masks = np.array([isnull(x) for x in arrays])
cats = None
if na_rep is None:
na_mask = np.logical_or.reduce(masks, axis=0)
result = np.empty(n, dtype=object)
np.putmask(result, na_mask, np.nan)
notmask = ~na_mask
tuples = zip(*[x[notmask] for x in arrays])
cats = [sep.join(tup) for tup in tuples]
result[notmask] = cats
else:
for i, x in enumerate(arrays):
x = np.where(masks[i], na_rep, x)
if cats is None:
cats = x
else:
cats = cats + sep + x
result = cats
return result
else:
arr = np.asarray(arr, dtype=object)
mask = isnull(arr)
if na_rep is None and mask.any():
return np.nan
return sep.join(np.where(mask, na_rep, arr))
def _length_check(others):
n = None
for x in others:
if n is None:
n = len(x)
elif len(x) != n:
raise ValueError('All arrays must be same length')
return n
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
from pandas.core.series import Series
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, Series):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isnull(arr)
try:
result = lib.map_infer_mask(arr, f, mask.view(np.uint8))
except (TypeError, AttributeError):
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
Parameters
----------
pat : string, valid regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
counts : Series/Index of integer values
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Return boolean Series/``array`` whether given pattern/regex is
contained in each string in the Series/Index.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
regex : bool, default True
If True use re.search, otherwise use Python in operator
Returns
-------
contained : Series/array of boolean values
See Also
--------
match : analagous, but stricter, relying on re.match instead of re.search
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Return boolean Series/``array`` indicating whether each string in the
Series/Index starts with passed pattern. Equivalent to
:meth:`str.startswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
startswith : Series/array of boolean values
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Return boolean Series indicating whether each string in the
Series/Index ends with passed pattern. Equivalent to
:meth:`str.endswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
endswith : Series/array of boolean values
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=True, flags=0):
"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : string
Character sequence or regular expression
repl : string
Replacement sequence
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
replaced : Series/Index of objects
"""
use_re = not case or len(pat) > 1 or flags
if use_re:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
n = n if n >= 0 else 0
def f(x):
return regex.sub(repl, x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series/Index by indicated number
of times.
Parameters
----------
repeats : int or array
Same value for all (int) or different value per (array)
Returns
-------
repeated : Series/Index of objects
"""
if np.isscalar(repeats):
def rep(x):
try:
return compat.binary_type.__mul__(x, repeats)
except TypeError:
return compat.text_type.__mul__(x, repeats)
return _na_map(rep, arr)
else:
def rep(x, r):
try:
return compat.binary_type.__mul__(x, r)
except TypeError:
return compat.text_type.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = lib.vec_binop(_values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False):
"""
Deprecated: Find groups in each string in the Series/Index
using passed regular expression.
If as_indexer=True, determine if each string matches a regular expression.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
as_indexer : False, by default, gives deprecated behavior better achieved
using str_extract. True return boolean indexer.
Returns
-------
Series/array of boolean values
if as_indexer=True
Series/Index of tuples
if as_indexer=False, default but deprecated
See Also
--------
contains : analagous, but less strict, relying on re.search instead of
re.match
extract : now preferred to the deprecated usage of match (as_indexer=False)
Notes
-----
To extract matched groups, which is the deprecated behavior of match, use
str.extract.
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if (not as_indexer) and regex.groups > 0:
# Do this first, to make sure it happens even if the re.compile
# raises below.
warnings.warn("In future versions of pandas, match will change to"
" always return a bool indexer.", UserWarning)
if as_indexer and regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning)
# If not as_indexer and regex.groups == 0, this returns empty lists
# and is basically useless, so we will not warn.
if (not as_indexer) and regex.groups > 0:
dtype = object
def f(x):
m = regex.match(x)
if m:
return m.groups()
else:
return []
else:
# This is the new behavior of str_match.
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def str_extract(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression.
Parameters
----------
pat : string
Pattern or regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
extracted groups : Series (one group) or DataFrame (multiple groups)
Note that dtype of the result is always object, even when no match is
found and the result is a Series or DataFrame containing only NaN
values.
Examples
--------
A pattern with one group will return a Series. Non-matches will be NaN.
>>> Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)')
0 1
1 2
2 NaN
dtype: object
A pattern with more than one group will return a DataFrame.
>>> Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> Series(['a1', 'b2', 'c3']).str.extract('([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
"""
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.index import Index
regex = re.compile(pat, flags=flags)
# just to be safe, check this
if regex.groups == 0:
raise ValueError("This pattern contains no groups to capture.")
empty_row = [np.nan]*regex.groups
def f(x):
if not isinstance(x, compat.string_types):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
if regex.groups == 1:
result = np.array([f(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, Index):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame([f(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object)
return result, name
def str_get_dummies(arr, sep='|'):
"""
Split each string in the Series by sep and return a frame of
dummy/indicator variables.
Parameters
----------
sep : string, default "|"
String to split on.
Returns
-------
dummies : DataFrame
Examples
--------
>>> Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
See Also
--------
pandas.get_dummies
"""
from pandas.core.frame import DataFrame
from pandas.core.index import Index
# GH9980, Index.str does not support get_dummies() as it returns a frame
if isinstance(arr, Index):
raise TypeError("get_dummies is not supported for string methods on Index")
# TODO remove this hack?
arr = arr.fillna('')
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - set([""]))
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)
return DataFrame(dummies, arr.index, tags)
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with
passed delimiter. Equivalent to :meth:`str.join`.
Parameters
----------
sep : string
Delimiter
Returns
-------
joined : Series/Index of objects
"""
return _na_map(sep.join, arr)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the
Series/Index. Equivalent to :func:`re.findall`.
Parameters
----------
pat : string
Pattern or regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
matches : Series/Index of lists
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side='left'):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``
Returns
-------
found : Series/Index of integer values
"""
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'find'
elif side == 'right':
method = 'rfind'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side='left'):
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'index'
elif side == 'right':
method = 'rindex'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side='left', fillchar=' '):
"""
Pad strings in the Series/Index with an additional character to
specified side.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with spaces
side : {'left', 'right', 'both'}, default 'left'
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
padded : Series/Index of objects
"""
if not isinstance(fillchar, compat.string_types):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if side == 'left':
f = lambda x: x.rjust(width, fillchar)
elif side == 'right':
f = lambda x: x.ljust(width, fillchar)
elif side == 'both':
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_split(arr, pat=None, n=None):
"""
Split each string (a la re.split) in the Series/Index by given
pattern, propagating NA values. Equivalent to :meth:`str.split`.
Parameters
----------
pat : string, default None
String or regular expression to split on. If None, splits on whitespace
n : int, default -1 (all)
None, 0 and -1 will be interpreted as return all splits
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
return_type : deprecated, use `expand`
Returns
-------
split : Series/Index or DataFrame/MultiIndex of objects
"""
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series/Index
Parameters
----------
start : int or None
stop : int or None
step : int or None
Returns
-------
sliced : Series/Index of objects
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a slice of each string in the Series/Index with another
string.
Parameters
----------
start : int or None
stop : int or None
repl : str or None
String for replacement
Returns
-------
replaced : Series/Index of objects
"""
if repl is None:
repl = ''
def f(x):
if x[start:stop] == '':
local_stop = start
else:
local_stop = stop
y = ''
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
def str_strip(arr, to_strip=None, side='both'):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
stripped : Series/Index of objects
"""
if side == 'both':
f = lambda x: x.strip(to_strip)
elif side == 'left':
f = lambda x: x.lstrip(to_strip)
elif side == 'right':
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_wrap(arr, width, **kwargs):
"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line-width
expand_tabs : bool, optional
If true, tab characters will be expanded to spaces (default: True)
replace_whitespace : bool, optional
If true, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True)
drop_whitespace : bool, optional
If true, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True)
break_long_words : bool, optional
If true, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width. (default: True)
break_on_hyphens : bool, optional
If true, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words. (default: True)
Returns
-------
wrapped : Series/Index of objects
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
"""
kwargs['width'] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr)
def str_translate(arr, table, deletechars=None):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`. Note that the optional
argument deletechars is only valid if you are using python 2. For python 3,
character deletion should be specified via the table argument.
Parameters
----------
table : dict (python 3), str or None (python 2)
In python 3, table is a mapping of Unicode ordinals to Unicode ordinals,
strings, or None. Unmapped characters are left untouched. Characters
mapped to None are deleted. :meth:`str.maketrans` is a helper function
for making translation tables.
In python 2, table is either a string of length 256 or None. If the
table argument is None, no translation is applied and the operation
simply removes the characters in deletechars. :func:`string.maketrans`
is a helper function for making translation tables.
deletechars : str, optional (python 2)
A string of characters to delete. This argument is only valid
in python 2.
Returns
-------
translated : Series/Index of objects
"""
if deletechars is None:
f = lambda x: x.translate(table)
else:
from pandas import compat
if compat.PY3:
raise ValueError("deletechars is not a valid argument for "
"str.translate in python 3. You should simply "
"specify character deletions in the table argument")
f = lambda x: x.translate(table, deletechars)
return _na_map(f, arr)
def str_get(arr, i):
"""
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Integer index (location)
Returns
-------
items : Series/Index of objects
"""
f = lambda x: x[i] if len(x) > i else np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index to unicode
using indicated encoding. Equivalent to :meth:`str.decode`.
Parameters
----------
encoding : string
errors : string
Returns
-------
decoded : Series/Index of objects
"""
f = lambda x: x.decode(encoding, errors)
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index to some other encoding
using indicated encoding. Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : string
errors : string
Returns
-------
encoded : Series/Index of objects
"""
f = lambda x: x.encode(encoding, errors)
return _na_map(f, arr)
def _noarg_wrapper(f, docstring=None, **kargs):
def wrapper(self):
result = _na_map(f, self.series, **kargs)
return self._wrap_result(result)
wrapper.__name__ = f.__name__
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError('Provide docstring')
return wrapper
def _pat_wrapper(f, flags=False, na=False, **kwargs):
def wrapper1(self, pat):
result = f(self.series, pat)
return self._wrap_result(result)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self.series, pat, flags=flags, **kwargs)
return self._wrap_result(result)
def wrapper3(self, pat, na=np.nan):
result = f(self.series, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(object):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, series):
self.series = series
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop,
step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notnull().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(self, result, **kwargs):
# leave as it is to keep extract and get_dummies results
# can be merged to _wrap_result_expand in v0.17
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.index import Index
if not hasattr(result, 'ndim'):
return result
name = kwargs.get('name') or getattr(result, 'name', None) or self.series.name
if result.ndim == 1:
if isinstance(self.series, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
return Index(result, name=name)
return Series(result, index=self.series.index, name=name)
else:
assert result.ndim < 3
return DataFrame(result, index=self.series.index)
def _wrap_result_expand(self, result, expand=False):
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
from pandas.core.index import Index, MultiIndex
if not hasattr(result, 'ndim'):
return result
if isinstance(self.series, Index):
name = getattr(result, 'name', None)
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if hasattr(result, 'dtype') and is_bool_dtype(result):
return result
if expand:
result = list(result)
return MultiIndex.from_tuples(result, names=name)
else:
return Index(result, name=name)
else:
index = self.series.index
if expand:
cons_row = self.series._constructor
cons = self.series._constructor_expanddim
data = [cons_row(x) for x in result]
return cons(data, index=index)
else:
name = getattr(result, 'name', None)
cons = self.series._constructor
return cons(result, name=name, index=index)
@copy(str_cat)
def cat(self, others=None, sep=None, na_rep=None):
result = str_cat(self.series, others=others, sep=sep, na_rep=na_rep)
return self._wrap_result(result)
@deprecate_kwarg('return_type', 'expand',
mapping={'series': False, 'frame': True})
@copy(str_split)
def split(self, pat=None, n=-1, expand=False):
result = str_split(self.series, pat, n=n)
return self._wrap_result_expand(result, expand=expand)
_shared_docs['str_partition'] = ("""
Split the string at the %(side)s occurrence of `sep`, and return 3 elements
containing the part before the separator, the separator itself,
and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
pat : string, default whitespace
String to split on.
expand : bool, default True
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
Returns
-------
split : DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Examples
--------
>>> s = Series(['A_B_C', 'D_E_F', 'X'])
0 A_B_C
1 D_E_F
2 X
dtype: object
>>> s.str.partition('_')
0 1 2
0 A _ B_C
1 D _ E_F
2 X
>>> s.str.rpartition('_')
0 1 2
0 A_B _ C
1 D_E _ F
2 X
""")
@Appender(_shared_docs['str_partition'] % {'side': 'first',
'return': '3 elements containing the string itself, followed by two empty strings',
'also': 'rpartition : Split the string at the last occurrence of `sep`'})
def partition(self, pat=' ', expand=True):
f = lambda x: x.partition(pat)
result = _na_map(f, self.series)
return self._wrap_result_expand(result, expand=expand)
@Appender(_shared_docs['str_partition'] % {'side': 'last',
'return': '3 elements containing two empty strings, followed by the string itself',
'also': 'partition : Split the string at the first occurrence of `sep`'})
def rpartition(self, pat=' ', expand=True):
f = lambda x: x.rpartition(pat)
result = _na_map(f, self.series)
return self._wrap_result_expand(result, expand=expand)
@copy(str_get)
def get(self, i):
result = str_get(self.series, i)
return self._wrap_result(result)
@copy(str_join)
def join(self, sep):
result = str_join(self.series, sep)
return self._wrap_result(result)
@copy(str_contains)
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(self.series, pat, case=case, flags=flags,
na=na, regex=regex)
return self._wrap_result(result)
@copy(str_match)
def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=False):
result = str_match(self.series, pat, case=case, flags=flags,
na=na, as_indexer=as_indexer)
return self._wrap_result(result)
@copy(str_replace)
def replace(self, pat, repl, n=-1, case=True, flags=0):
result = str_replace(self.series, pat, repl, n=n, case=case,
flags=flags)
return self._wrap_result(result)
@copy(str_repeat)
def repeat(self, repeats):
result = str_repeat(self.series, repeats)
return self._wrap_result(result)
@copy(str_pad)
def pad(self, width, side='left', fillchar=' '):
result = str_pad(self.series, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs['str_pad'] = ("""
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : Series/Index of objects
""")
@Appender(_shared_docs['str_pad'] % dict(side='left and right',
method='center'))
def center(self, width, fillchar=' '):
return self.pad(width, side='both', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='right', method='right'))
def ljust(self, width, fillchar=' '):
return self.pad(width, side='right', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='left', method='left'))
def rjust(self, width, fillchar=' '):
return self.pad(width, side='left', fillchar=fillchar)
def zfill(self, width):
""""
Filling left side of strings in the Series/Index with 0.
Equivalent to :meth:`str.zfill`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with 0
Returns
-------
filled : Series/Index of objects
"""
result = str_pad(self.series, width, side='left', fillchar='0')
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self.series, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self.series, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
result = str_decode(self.series, encoding, errors)
return self._wrap_result(result)
@copy(str_encode)
def encode(self, encoding, errors="strict"):
result = str_encode(self.series, encoding, errors)
return self._wrap_result(result)
_shared_docs['str_strip'] = ("""
Strip whitespace (including newlines) from each string in the
Series/Index from %(side)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
stripped : Series/Index of objects
""")
@Appender(_shared_docs['str_strip'] % dict(side='left and right sides',
method='strip'))
def strip(self, to_strip=None):
result = str_strip(self.series, to_strip, side='both')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='left side',
method='lstrip'))
def lstrip(self, to_strip=None):
result = str_strip(self.series, to_strip, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='right side',
method='rstrip'))
def rstrip(self, to_strip=None):
result = str_strip(self.series, to_strip, side='right')
return self._wrap_result(result)
@copy(str_wrap)
def wrap(self, width, **kwargs):
result = str_wrap(self.series, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
def get_dummies(self, sep='|'):
result = str_get_dummies(self.series, sep)
return self._wrap_result(result)
@copy(str_translate)
def translate(self, table, deletechars=None):
result = str_translate(self.series, table, deletechars)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True)
startswith = _pat_wrapper(str_startswith, na=True)
endswith = _pat_wrapper(str_endswith, na=True)
findall = _pat_wrapper(str_findall, flags=True)
@copy(str_extract)
def extract(self, pat, flags=0):
result, name = str_extract(self.series, pat, flags=flags)
return self._wrap_result(result, name=name)
_shared_docs['find'] = ("""
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of integer values
See Also
--------
%(also)s
""")
@Appender(_shared_docs['find'] % dict(side='lowest', method='find',
also='rfind : Return highest indexes in each strings'))
def find(self, sub, start=0, end=None):
result = str_find(self.series, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['find'] % dict(side='highest', method='rfind',
also='find : Return lowest indexes in each strings'))
def rfind(self, sub, start=0, end=None):
result = str_find(self.series, sub, start=start, end=end, side='right')
return self._wrap_result(result)
def normalize(self, form):
"""Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, compat.u_safe(x))
result = _na_map(f, self.series)
return self._wrap_result(result)
_shared_docs['index'] = ("""
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as ``str.%(similar)s``
except instead of returning -1, it raises a ValueError when the substring
is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of objects
See Also
--------
%(also)s
""")
@Appender(_shared_docs['index'] % dict(side='lowest', similar='find', method='index',
also='rindex : Return highest indexes in each strings'))
def index(self, sub, start=0, end=None):
result = str_index(self.series, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['index'] % dict(side='highest', similar='rfind', method='rindex',
also='index : Return lowest indexes in each strings'))
def rindex(self, sub, start=0, end=None):
result = str_index(self.series, sub, start=start, end=end, side='right')
return self._wrap_result(result)
_shared_docs['len'] = ("""
Compute length of each string in the Series/Index.
Returns
-------
lengths : Series/Index of integer values
""")
len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)
_shared_docs['casemethods'] = ("""
Convert strings in the Series/Index to %(type)s.
Equivalent to :meth:`str.%(method)s`.
Returns
-------
converted : Series/Index of objects
""")
_shared_docs['lower'] = dict(type='lowercase', method='lower')
_shared_docs['upper'] = dict(type='uppercase', method='upper')
_shared_docs['title'] = dict(type='titlecase', method='title')
_shared_docs['capitalize'] = dict(type='be capitalized',
method='capitalize')
_shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase')
lower = _noarg_wrapper(lambda x: x.lower(),
docstring=_shared_docs['casemethods'] %
_shared_docs['lower'])
upper = _noarg_wrapper(lambda x: x.upper(),
docstring=_shared_docs['casemethods'] %
_shared_docs['upper'])
title = _noarg_wrapper(lambda x: x.title(),
docstring=_shared_docs['casemethods'] %
_shared_docs['title'])
capitalize = _noarg_wrapper(lambda x: x.capitalize(),
docstring=_shared_docs['casemethods'] %
_shared_docs['capitalize'])
swapcase = _noarg_wrapper(lambda x: x.swapcase(),
docstring=_shared_docs['casemethods'] %
_shared_docs['swapcase'])
_shared_docs['ismethods'] = ("""
Check whether all characters in each string in the Series/Index
are %(type)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
is : Series/array of boolean values
""")
_shared_docs['isalnum'] = dict(type='alphanumeric', method='isalnum')
_shared_docs['isalpha'] = dict(type='alphabetic', method='isalpha')
_shared_docs['isdigit'] = dict(type='digits', method='isdigit')
_shared_docs['isspace'] = dict(type='whitespace', method='isspace')
_shared_docs['islower'] = dict(type='lowercase', method='islower')
_shared_docs['isupper'] = dict(type='uppercase', method='isupper')
_shared_docs['istitle'] = dict(type='titlecase', method='istitle')
_shared_docs['isnumeric'] = dict(type='numeric', method='isnumeric')
_shared_docs['isdecimal'] = dict(type='decimal', method='isdecimal')
isalnum = _noarg_wrapper(lambda x: x.isalnum(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalnum'])
isalpha = _noarg_wrapper(lambda x: x.isalpha(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalpha'])
isdigit = _noarg_wrapper(lambda x: x.isdigit(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdigit'])
isspace = _noarg_wrapper(lambda x: x.isspace(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isspace'])
islower = _noarg_wrapper(lambda x: x.islower(),
docstring=_shared_docs['ismethods'] %
_shared_docs['islower'])
isupper = _noarg_wrapper(lambda x: x.isupper(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isupper'])
istitle = _noarg_wrapper(lambda x: x.istitle(),
docstring=_shared_docs['ismethods'] %
_shared_docs['istitle'])
isnumeric = _noarg_wrapper(lambda x: compat.u_safe(x).isnumeric(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isnumeric'])
isdecimal = _noarg_wrapper(lambda x: compat.u_safe(x).isdecimal(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdecimal'])
| mit |
anurag313/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
rmdort/clipper | containers/python/test_sklearn_cifar_container.py | 3 | 1871 | import numpy as np
import os
import pandas as pd
import rpc
import sys
from sklearn_cifar_container import SklearnCifarContainer
from sklearn.metrics import accuracy_score
classes = [
'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck'
]
positive_class = classes.index('airplane')
negative_class = classes.index('bird')
def load_cifar(cifar_location, cifar_filename="train.data", norm=False):
cifar_path = cifar_location + "/" + cifar_filename
print("Source file: %s" % cifar_path)
df = pd.read_csv(cifar_path, sep=",", header=None)
data = df.values
print("Number of image files: %d" % len(data))
y = data[:, 0]
X = data[:, 1:]
Z = X
if norm:
mu = np.mean(X.T, 0)
sigma = np.var(X.T, 0)
Z = (X.T - mu) / np.array([np.sqrt(z) if z > 0 else 1. for z in sigma])
Z = Z.T
return (Z, y)
def filter_data(X, y):
X_train, y_train = [], []
for (example, label) in zip(X, y):
if label == positive_class:
X_train.append(example)
y_train.append(1.0)
elif label == negative_class:
X_train.append(example)
y_train.append(-1.0)
X_train = np.array(X_train)
y_train = np.array(y_train)
return X_train, y_train
if __name__ == '__main__':
model_path = os.environ["CLIPPER_MODEL_PATH"]
pkl_names = [
l for l in os.listdir(model_path) if os.path.splitext(l)[1] == ".pkl"
]
assert len(pkl_names) == 1
pkl_path = os.path.join(model_path, pkl_names[0])
print(pkl_path)
model = SklearnCifarContainer(pkl_path)
X_test, y_test = load_cifar('data', 'test.data')
X_test, y_test = filter_data(X_test, y_test)
y_test[np.where(y_test == -1)] = 0
preds = model.predict_ints(X_test)
print("Test accuracy: %f" % accuracy_score(y_test, preds))
| apache-2.0 |
cainiaocome/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
synthicity/activitysim | activitysim/abm/models/trip_destination.py | 2 | 19741 | # ActivitySim
# See full license in LICENSE.txt.
from __future__ import (absolute_import, division, print_function, )
from future.standard_library import install_aliases
install_aliases() # noqa: E402
from builtins import range
import logging
import numpy as np
import pandas as pd
from activitysim.core import tracing
from activitysim.core import config
from activitysim.core import pipeline
from activitysim.core import simulate
from activitysim.core import inject
from activitysim.core.tracing import print_elapsed_time
from activitysim.core.util import reindex
from activitysim.core.util import assign_in_place
from .util import expressions
from activitysim.core import assign
from activitysim.abm.tables.size_terms import tour_destination_size_terms
from activitysim.core.skim import DataFrameMatrix
from activitysim.core.interaction_sample_simulate import interaction_sample_simulate
from activitysim.core.interaction_sample import interaction_sample
from activitysim.abm.models.util.trip import cleanup_failed_trips
logger = logging.getLogger(__name__)
NO_DESTINATION = -1
def get_spec_for_purpose(model_settings, spec_name, purpose):
omnibus_spec = simulate.read_model_spec(file_name=model_settings[spec_name])
spec = omnibus_spec[[purpose]]
# might as well ignore any spec rows with 0 utility
spec = spec[spec.iloc[:, 0] != 0]
assert spec.shape[0] > 0
return spec
def trip_destination_sample(
primary_purpose,
trips,
alternatives,
model_settings,
size_term_matrix, skims,
chunk_size, trace_hh_id,
trace_label):
"""
Returns
-------
destination_sample: pandas.dataframe
choices_df from interaction_sample with (up to) sample_size alts for each chooser row
index (non unique) is trip_id from trips (duplicated for each alt)
and columns dest_taz, prob, and pick_count
dest_taz: int
alt identifier (dest_taz) from alternatives[<alt_col_name>]
prob: float
the probability of the chosen alternative
pick_count : int
number of duplicate picks for chooser, alt
"""
trace_label = tracing.extend_trace_label(trace_label, 'trip_destination_sample')
spec = get_spec_for_purpose(model_settings, 'DESTINATION_SAMPLE_SPEC', primary_purpose)
sample_size = model_settings["SAMPLE_SIZE"]
alt_dest_col_name = model_settings["ALT_DEST"]
logger.info("Running %s with %d trips", trace_label, trips.shape[0])
locals_dict = config.get_model_constants(model_settings).copy()
locals_dict.update({
'size_terms': size_term_matrix
})
locals_dict.update(skims)
destination_sample = interaction_sample(
choosers=trips,
alternatives=alternatives,
sample_size=sample_size,
alt_col_name=alt_dest_col_name,
allow_zero_probs=True,
spec=spec,
skims=skims,
locals_d=locals_dict,
chunk_size=chunk_size,
trace_label=trace_label)
return destination_sample
def compute_ood_logsums(
choosers,
logsum_settings,
od_skims,
locals_dict,
chunk_size,
trace_label):
"""
Compute one (of two) out-of-direction logsums for destination alternatives
Will either be trip_origin -> alt_dest or alt_dest -> primary_dest
"""
locals_dict.update(od_skims)
expressions.annotate_preprocessors(
choosers, locals_dict, od_skims,
logsum_settings,
trace_label)
nest_spec = config.get_logit_model_settings(logsum_settings)
logsum_spec = simulate.read_model_spec(file_name=logsum_settings['SPEC'])
logsums = simulate.simple_simulate_logsums(
choosers,
logsum_spec,
nest_spec,
skims=od_skims,
locals_d=locals_dict,
chunk_size=chunk_size,
trace_label=trace_label)
assert logsums.index.equals(choosers.index)
# FIXME not strictly necessary, but would make trace files more legible?
# logsums = logsums.replace(-np.inf, -999)
return logsums
def compute_logsums(
primary_purpose,
trips,
destination_sample,
tours_merged,
model_settings,
skims,
chunk_size, trace_hh_id,
trace_label):
"""
Calculate mode choice logsums using the same recipe as for trip_mode_choice, but do it twice
for each alternative since we need out-of-direction logsum
(i.e . origin to alt_dest, and alt_dest to half-tour destination)
Returns
-------
adds od_logsum and dp_logsum columns to trips (in place)
"""
trace_label = tracing.extend_trace_label(trace_label, 'compute_logsums')
logger.info("Running %s with %d samples", trace_label, destination_sample.shape[0])
# - trips_merged - merge trips and tours_merged
trips_merged = pd.merge(
trips,
tours_merged,
left_on='tour_id',
right_index=True,
how="left")
assert trips_merged.index.equals(trips.index)
# - choosers - merge destination_sample and trips_merged
# re/set index because pandas merge does not preserve left index if it has duplicate values!
choosers = pd.merge(destination_sample,
trips_merged.reset_index(),
left_index=True,
right_on='trip_id',
how="left",
suffixes=('', '_r')).set_index('trip_id')
assert choosers.index.equals(destination_sample.index)
logsum_settings = config.read_model_settings(model_settings['LOGSUM_SETTINGS'])
omnibus_coefficient_spec = \
assign.read_constant_spec(config.config_file_path(logsum_settings['COEFFS']))
coefficient_spec = omnibus_coefficient_spec[primary_purpose]
constants = config.get_model_constants(logsum_settings)
locals_dict = assign.evaluate_constants(coefficient_spec, constants=constants)
locals_dict.update(constants)
# - od_logsums
od_skims = {
'ORIGIN': model_settings['TRIP_ORIGIN'],
'DESTINATION': model_settings['ALT_DEST'],
"odt_skims": skims['odt_skims'],
"od_skims": skims['od_skims'],
}
destination_sample['od_logsum'] = compute_ood_logsums(
choosers,
logsum_settings,
od_skims,
locals_dict,
chunk_size,
trace_label=tracing.extend_trace_label(trace_label, 'od'))
# - dp_logsums
dp_skims = {
'ORIGIN': model_settings['ALT_DEST'],
'DESTINATION': model_settings['PRIMARY_DEST'],
"odt_skims": skims['dpt_skims'],
"od_skims": skims['dp_skims'],
}
destination_sample['dp_logsum'] = compute_ood_logsums(
choosers,
logsum_settings,
dp_skims,
locals_dict,
chunk_size,
trace_label=tracing.extend_trace_label(trace_label, 'dp'))
def trip_destination_simulate(
primary_purpose,
trips,
destination_sample,
model_settings,
size_term_matrix, skims,
chunk_size, trace_hh_id,
trace_label):
"""
Chose destination from destination_sample (with od_logsum and dp_logsum columns added)
Returns
-------
choices - pandas.Series
destination alt chosen
"""
trace_label = tracing.extend_trace_label(trace_label, 'trip_destination_simulate')
spec = get_spec_for_purpose(model_settings, 'DESTINATION_SPEC', primary_purpose)
alt_dest_col_name = model_settings["ALT_DEST"]
logger.info("Running trip_destination_simulate with %d trips", len(trips))
locals_dict = config.get_model_constants(model_settings).copy()
locals_dict.update({
'size_terms': size_term_matrix
})
locals_dict.update(skims)
destinations = interaction_sample_simulate(
choosers=trips,
alternatives=destination_sample,
spec=spec,
choice_column=alt_dest_col_name,
allow_zero_probs=True, zero_prob_choice_val=NO_DESTINATION,
skims=skims,
locals_d=locals_dict,
chunk_size=chunk_size,
trace_label=trace_label,
trace_choice_name='trip_dest')
# drop any failed zero_prob destinations
if (destinations == NO_DESTINATION).any():
# logger.debug("dropping %s failed destinations", destinations == NO_DESTINATION).sum()
destinations = destinations[destinations != NO_DESTINATION]
return destinations
def choose_trip_destination(
primary_purpose,
trips,
alternatives,
tours_merged,
model_settings,
size_term_matrix, skims,
chunk_size, trace_hh_id,
trace_label):
logger.info("choose_trip_destination %s with %d trips", trace_label, trips.shape[0])
t0 = print_elapsed_time()
# - trip_destination_sample
destination_sample = trip_destination_sample(
primary_purpose=primary_purpose,
trips=trips,
alternatives=alternatives,
model_settings=model_settings,
size_term_matrix=size_term_matrix, skims=skims,
chunk_size=chunk_size, trace_hh_id=trace_hh_id,
trace_label=trace_label)
dropped_trips = ~trips.index.isin(destination_sample.index.unique())
if dropped_trips.any():
logger.warning("%s trip_destination_sample %s trips "
"without viable destination alternatives" %
(trace_label, dropped_trips.sum()))
trips = trips[~dropped_trips]
t0 = print_elapsed_time("%s.trip_destination_sample" % trace_label, t0)
if trips.empty:
return pd.Series(index=trips.index)
# - compute logsums
compute_logsums(
primary_purpose=primary_purpose,
trips=trips,
destination_sample=destination_sample,
tours_merged=tours_merged,
model_settings=model_settings,
skims=skims,
chunk_size=chunk_size, trace_hh_id=trace_hh_id,
trace_label=trace_label)
t0 = print_elapsed_time("%s.compute_logsums" % trace_label, t0)
# - trip_destination_simulate
destinations = trip_destination_simulate(
primary_purpose=primary_purpose,
trips=trips,
destination_sample=destination_sample,
model_settings=model_settings,
size_term_matrix=size_term_matrix, skims=skims,
chunk_size=chunk_size, trace_hh_id=trace_hh_id,
trace_label=trace_label)
dropped_trips = ~trips.index.isin(destinations.index)
if dropped_trips.any():
logger.warning("%s trip_destination_simulate %s trips "
"without viable destination alternatives" %
(trace_label, dropped_trips.sum()))
t0 = print_elapsed_time("%s.trip_destination_simulate" % trace_label, t0)
return destinations
def wrap_skims(model_settings):
"""
wrap skims of trip destination using origin, dest column names from model settings.
Various of these are used by destination_sample, compute_logsums, and destination_simulate
so we create them all here with canonical names.
Note that compute_logsums aliases their names so it can use the same equations to compute
logsums from origin to alt_dest, and from alt_dest to primarly destination
odt_skims - SkimStackWrapper: trip origin, trip alt_dest, time_of_day
dot_skims - SkimStackWrapper: trip alt_dest, trip origin, time_of_day
dpt_skims - SkimStackWrapper: trip alt_dest, trip primary_dest, time_of_day
pdt_skims - SkimStackWrapper: trip primary_dest,trip alt_dest, time_of_day
od_skims - SkimDictWrapper: trip origin, trip alt_dest
dp_skims - SkimDictWrapper: trip alt_dest, trip primary_dest
Parameters
----------
model_settings
Returns
-------
dict containing skims, keyed by canonical names relative to tour orientation
"""
skim_dict = inject.get_injectable('skim_dict')
skim_stack = inject.get_injectable('skim_stack')
o = model_settings['TRIP_ORIGIN']
d = model_settings['ALT_DEST']
p = model_settings['PRIMARY_DEST']
skims = {
"odt_skims": skim_stack.wrap(left_key=o, right_key=d, skim_key='trip_period'),
"dot_skims": skim_stack.wrap(left_key=d, right_key=o, skim_key='trip_period'),
"dpt_skims": skim_stack.wrap(left_key=d, right_key=p, skim_key='trip_period'),
"pdt_skims": skim_stack.wrap(left_key=p, right_key=d, skim_key='trip_period'),
"od_skims": skim_dict.wrap(o, d),
"dp_skims": skim_dict.wrap(d, p),
}
return skims
def run_trip_destination(
trips,
tours_merged,
chunk_size, trace_hh_id,
trace_label):
"""
trip destination - main functionality separated from model step so it can be called iteratively
Run the trip_destination model, assigning destinations for each (intermediate) trip
(last trips already have a destination - either the tour primary destination or Home)
Set trip destination and origin columns, and a boolean failed flag for any failed trips
(destination for flagged failed trips will be set to -1)
Parameters
----------
trips
tours_merged
chunk_size
trace_hh_id
trace_label
Returns
-------
"""
model_settings = config.read_model_settings('trip_destination.yaml')
preprocessor_settings = model_settings.get('preprocessor', None)
logsum_settings = config.read_model_settings(model_settings['LOGSUM_SETTINGS'])
land_use = inject.get_table('land_use')
size_terms = inject.get_injectable('size_terms')
# - initialize trip origin and destination to those of half-tour
# (we will sequentially adjust intermediate trips origin and destination as we choose them)
tour_destination = reindex(tours_merged.destination, trips.tour_id).astype(int)
tour_origin = reindex(tours_merged.origin, trips.tour_id).astype(int)
trips['destination'] = np.where(trips.outbound, tour_destination, tour_origin)
trips['origin'] = np.where(trips.outbound, tour_origin, tour_destination)
trips['failed'] = False
trips = trips.sort_index()
trips['next_trip_id'] = np.roll(trips.index, -1)
trips.next_trip_id = trips.next_trip_id.where(trips.trip_num < trips.trip_count, 0)
# - filter tours_merged (AFTER copying destination and origin columns to trips)
# tours_merged is used for logsums, we filter it here upfront to save space and time
tours_merged_cols = logsum_settings['TOURS_MERGED_CHOOSER_COLUMNS']
if 'REDUNDANT_TOURS_MERGED_CHOOSER_COLUMNS' in model_settings:
redundant_cols = model_settings['REDUNDANT_TOURS_MERGED_CHOOSER_COLUMNS']
tours_merged_cols = [c for c in tours_merged_cols if c not in redundant_cols]
tours_merged = tours_merged[tours_merged_cols]
# - skims
skims = wrap_skims(model_settings)
# - size_terms and alternatives
alternatives = tour_destination_size_terms(land_use, size_terms, 'trip')
# DataFrameMatrix alows us to treat dataframe as virtual a 2-D array, indexed by TAZ, purpose
# e.g. size_terms.get(df.dest_taz, df.purpose)
# returns a series of size_terms for each chooser's dest_taz and purpose with chooser index
size_term_matrix = DataFrameMatrix(alternatives)
# don't need size terms in alternatives, just TAZ index
alternatives = alternatives.drop(alternatives.columns, axis=1)
alternatives.index.name = model_settings['ALT_DEST']
# - process intermediate trips in ascending trip_num order
intermediate = trips.trip_num < trips.trip_count
if intermediate.any():
first_trip_num = trips[intermediate].trip_num.min()
last_trip_num = trips[intermediate].trip_num.max()
# iterate over trips in ascending trip_num order
for trip_num in range(first_trip_num, last_trip_num + 1):
nth_trips = trips[intermediate & (trips.trip_num == trip_num)]
nth_trace_label = tracing.extend_trace_label(trace_label, 'trip_num_%s' % trip_num)
# - annotate nth_trips
if preprocessor_settings:
expressions.assign_columns(
df=nth_trips,
model_settings=preprocessor_settings,
locals_dict=config.get_model_constants(model_settings),
trace_label=nth_trace_label)
logger.info("Running %s with %d trips", nth_trace_label, nth_trips.shape[0])
# - choose destination for nth_trips, segmented by primary_purpose
choices_list = []
for primary_purpose, trips_segment in nth_trips.groupby('primary_purpose'):
choices = choose_trip_destination(
primary_purpose,
trips_segment,
alternatives,
tours_merged,
model_settings,
size_term_matrix, skims,
chunk_size, trace_hh_id,
trace_label=tracing.extend_trace_label(nth_trace_label, primary_purpose))
choices_list.append(choices)
destinations = pd.concat(choices_list)
failed_trip_ids = nth_trips.index.difference(destinations.index)
if failed_trip_ids.any():
logger.warning("%s sidelining %s trips without viable destination alternatives" %
(nth_trace_label, failed_trip_ids.shape[0]))
next_trip_ids = nth_trips.next_trip_id.reindex(failed_trip_ids)
trips.loc[failed_trip_ids, 'failed'] = True
trips.loc[failed_trip_ids, 'destination'] = -1
trips.loc[next_trip_ids, 'origin'] = trips.loc[failed_trip_ids].origin.values
# - assign choices to these trips destinations and to next trips origin
assign_in_place(trips, destinations.to_frame('destination'))
destinations.index = nth_trips.next_trip_id.reindex(destinations.index)
assign_in_place(trips, destinations.to_frame('origin'))
del trips['next_trip_id']
return trips
@inject.step()
def trip_destination(
trips,
tours_merged,
chunk_size, trace_hh_id):
"""
Choose a destination for all 'intermediate' trips based on trip purpose.
Final trips already have a destination (the primary tour destination for outbound trips,
and home for inbound trips.)
"""
trace_label = 'trip_destination'
model_settings = config.read_model_settings('trip_destination.yaml')
CLEANUP = model_settings.get('CLEANUP', True)
trips_df = trips.to_frame()
tours_merged_df = tours_merged.to_frame()
logger.info("Running %s with %d trips", trace_label, trips_df.shape[0])
trips_df = run_trip_destination(
trips_df,
tours_merged_df,
chunk_size, trace_hh_id,
trace_label)
if trips_df.failed.any():
logger.warning("%s %s failed trips", trace_label, trips_df.failed.sum())
file_name = "%s_failed_trips" % trace_label
logger.info("writing failed trips to %s", file_name)
tracing.write_csv(trips_df[trips_df.failed], file_name=file_name, transpose=False)
if CLEANUP:
trips_df = cleanup_failed_trips(trips_df)
elif trips_df.failed.any():
logger.warning("%s keeping %s sidelined failed trips" %
(trace_label, trips_df.failed.sum()))
pipeline.replace_table("trips", trips_df)
print("trips_df\n", trips_df.shape)
if trace_hh_id:
tracing.trace_df(trips_df,
label=trace_label,
slicer='trip_id',
index_label='trip_id',
warn_if_empty=True)
| agpl-3.0 |
alan-unravel/bokeh | examples/interactions/interactive_bubble/data.py | 49 | 1265 | import numpy as np
from bokeh.palettes import Spectral6
def process_data():
from bokeh.sampledata.gapminder import fertility, life_expectancy, population, regions
# Make the column names ints not strings for handling
columns = list(fertility.columns)
years = list(range(int(columns[0]), int(columns[-1])))
rename_dict = dict(zip(columns, years))
fertility = fertility.rename(columns=rename_dict)
life_expectancy = life_expectancy.rename(columns=rename_dict)
population = population.rename(columns=rename_dict)
regions = regions.rename(columns=rename_dict)
# Turn population into bubble sizes. Use min_size and factor to tweak.
scale_factor = 200
population_size = np.sqrt(population / np.pi) / scale_factor
min_size = 3
population_size = population_size.where(population_size >= min_size).fillna(min_size)
# Use pandas categories and categorize & color the regions
regions.Group = regions.Group.astype('category')
regions_list = list(regions.Group.cat.categories)
def get_color(r):
return Spectral6[regions_list.index(r.Group)]
regions['region_color'] = regions.apply(get_color, axis=1)
return fertility, life_expectancy, population_size, regions, years, regions_list
| bsd-3-clause |
PiotrGrzybowski/NeuralNetworks | networks/neurons/optimizer.py | 1 | 5209 | import numpy as np
import matplotlib.pyplot as plt
from drawnow import drawnow
import time
from neurons.activations import bipolar
LEAST_MEAN_ERROR = 'least_mean_square'
DISCRETE_ERROR = 'discrete'
class Optimizer:
def __init__(self, loss, learning_rate, epochs, stop_error):
self.loss = loss
self.learning_rate = learning_rate
self.epochs = epochs
self.continue_learning = True
self.stop_error = stop_error
self.epochs_train_error = np.zeros(epochs)
self.epochs_validation_error = np.zeros(epochs)
def train(self, neuron, training_data, validation_data, visualize):
if visualize:
self.initialize_plotting(neuron, training_data, validation_data)
epoch = 0
while epoch < self.epochs and self.continue_learning:
training_error = 0
validation_error = 0
np.random.shuffle(training_data)
if visualize:
self.plot_output(neuron)
for sample in training_data:
input_values, expected_value = Optimizer.split_sample(neuron, sample)
error, training_error = self.calculate_error(expected_value, input_values, neuron, training_error)
self.update_weights(error, input_values, neuron)
self.update_bias(error, neuron)
validation_error = self.calculate_validation_error(neuron, validation_data, validation_error)
epoch_error_training = self.calculate_epoch_error(training_error, len(training_data))
epoch_error_validation = self.calculate_epoch_error(validation_error, len(validation_data))
self.epochs_train_error[epoch] = epoch_error_training
self.epochs_validation_error[epoch] = epoch_error_validation
# print(("Training Error = {}".format(epoch_error_training))
# print(("Validation Error = {}".format(epoch_error_validation))
self.check_stop_condition(epoch_error_training)
epoch += 1
if visualize:
input("Press Enter to continue...")
return self.epochs_train_error, self.epochs_validation_error, epoch
def check_stop_condition(self, epoch_error_training):
if self.loss == DISCRETE_ERROR:
if epoch_error_training == self.stop_error:
self.continue_learning = False
else:
if epoch_error_training < self.stop_error:
self.continue_learning = False
def calculate_validation_error(self, neuron, validation_data, validation_error):
for sample in validation_data:
input_values, expected_value = Optimizer.split_sample(neuron, sample)
error, validation_error = self.calculate_error(expected_value, input_values, neuron, validation_error)
return validation_error
@staticmethod
def split_sample(neuron, sample):
return sample[:neuron.get_number_of_inputs], sample[-1]
def update_bias(self, error, neuron):
neuron.bias += self.learning_rate * error
def update_weights(self, error, input_values, neuron):
for i in range(neuron.get_number_of_inputs):
neuron.weights[i] += self.learning_rate * error * input_values[i]
def calculate_error(self, expected_value, input_values, neuron, cumulative_error):
if self.loss == DISCRETE_ERROR:
error = expected_value - neuron.predict(input_values)
cumulative_error += error
elif self.loss == LEAST_MEAN_ERROR:
error = 2 * (expected_value - neuron.raw_output(input_values))
cumulative_error += np.power(expected_value - neuron.raw_output(input_values), 2)
else:
raise ValueError('Wrong loss function')
return error, cumulative_error
def calculate_epoch_error(self, error, number_of_samples):
return error if self.loss == DISCRETE_ERROR else error / number_of_samples
def update_line(self, new_x, new_y):
self.plot.set_xdata(new_x)
self.plot.set_ydata(new_y)
plt.draw()
@staticmethod
def get_x_axis(neuron):
return [-1, 1] if neuron.get_activation == bipolar else [0, 1]
@staticmethod
def get_y_axis(neuron):
x = Optimizer.get_x_axis(neuron)
return [(-neuron.bias - (neuron.weights[0]) * x[0]) / neuron.weights[1],
(-neuron.bias - neuron.weights[0]) / neuron.weights[1]]
def draw_fig(self):
plt.xlim((-2.1, 2.1))
plt.ylim((-2.1, 2.1))
for sample in self.validation_data:
result = self.neuron.predict(sample)
if result == 1:
plt.plot(sample[0], sample[1], 'or')
else:
plt.plot(sample[0], sample[1], 'ob')
plt.plot(self.x, self.y)
def plot_output(self, neuron):
self.x = self.get_x_axis(neuron)
self.y = self.get_y_axis(neuron)
drawnow(self.draw_fig)
def initialize_plotting(self, neuron, training_data, validation_data):
self.plot, = plt.plot([], [])
plt.ion()
self.training_data = training_data
self.validation_data = validation_data
self.neuron = neuron
| apache-2.0 |
badlogicmanpreet/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/font_manager.py | 69 | 42655 | """
A module for finding, managing, and using fonts across platforms.
This module provides a single :class:`FontManager` instance that can
be shared across backends and platforms. The :func:`findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified :class:`FontProperties`
instance. The :class:`FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
Future versions may implement the Level 2 or 2.1 specifications.
Experimental support is included for using `fontconfig
<http://www.fontconfig.org>`_ on Unix variant plaforms (Linux, OS X,
Solaris). To enable it, set the constant ``USE_FONTCONFIG`` in this
file to ``True``. Fontconfig has the advantage that it is the
standard way to look up fonts on X11 platforms, so if a font is
installed, it is much more likely to be found.
"""
"""
KNOWN ISSUES
- documentation
- font variant is untested
- font stretch is incomplete
- font size is incomplete
- font size_adjust is incomplete
- default font algorithm needs improvement and testing
- setWeights function needs improvement
- 'light' is an invalid weight value, remove it.
- update_fonts not implemented
Authors : John Hunter <jdhunter@ace.bsd.uchicago.edu>
Paul Barrett <Barrett@STScI.Edu>
Michael Droettboom <mdroe@STScI.edu>
Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
License : matplotlib license (PSF compatible)
The font directory code is from ttfquery,
see license/LICENSE_TTFQUERY.
"""
import os, sys, glob
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import afm
from matplotlib import ft2font
from matplotlib import rcParams, get_configdir
from matplotlib.cbook import is_string_like
from matplotlib.fontconfig_pattern import \
parse_fontconfig_pattern, generate_fontconfig_pattern
try:
import cPickle as pickle
except ImportError:
import pickle
USE_FONTCONFIG = False
verbose = matplotlib.verbose
font_scalings = {
'xx-small' : 0.579,
'x-small' : 0.694,
'small' : 0.833,
'medium' : 1.0,
'large' : 1.200,
'x-large' : 1.440,
'xx-large' : 1.728,
'larger' : 1.2,
'smaller' : 0.833,
None : 1.0}
stretch_dict = {
'ultra-condensed' : 100,
'extra-condensed' : 200,
'condensed' : 300,
'semi-condensed' : 400,
'normal' : 500,
'semi-expanded' : 600,
'expanded' : 700,
'extra-expanded' : 800,
'ultra-expanded' : 900}
weight_dict = {
'ultralight' : 100,
'light' : 200,
'normal' : 400,
'regular' : 400,
'book' : 400,
'medium' : 500,
'roman' : 500,
'semibold' : 600,
'demibold' : 600,
'demi' : 600,
'bold' : 700,
'heavy' : 800,
'extra bold' : 800,
'black' : 900}
font_family_aliases = set([
'serif',
'sans-serif',
'cursive',
'fantasy',
'monospace',
'sans'])
# OS Font paths
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/"
]
if not USE_FONTCONFIG:
home = os.environ.get('HOME')
if home is not None:
# user fonts on OSX
path = os.path.join(home, 'Library', 'Fonts')
OSXFontDirectories.append(path)
path = os.path.join(home, '.fonts')
X11FontDirectories.append(path)
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions extensions that are synonyms for
the given file extension *fileext*.
"""
return {'ttf': ('ttf', 'otf'),
'otf': ('ttf', 'otf'),
'afm': ('afm',)}[fontext]
def win32FontDirectory():
"""
Return the user-specified font directory for Win32. This is
looked up from the registry key::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, $WINDIR/Fonts will be returned.
"""
try:
import _winreg
except ImportError:
pass # Fall through to default
else:
try:
user = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, MSFolders)
try:
try:
return _winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
pass # Fall through to default
finally:
_winreg.CloseKey(user)
except OSError:
pass # Fall through to default
return os.path.join(os.environ['WINDIR'], 'Fonts')
def win32InstalledFonts(directory=None, fontext='ttf'):
"""
Search for fonts in the specified font directory, or use the
system directories if none given. A list of TrueType font
filenames are returned by default, or AFM fonts if *fontext* ==
'afm'.
"""
import _winreg
if directory is None:
directory = win32FontDirectory()
fontext = get_fontext_synonyms(fontext)
key, items = None, {}
for fontdir in MSFontDirectories:
try:
local = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, fontdir)
except OSError:
continue
if not local:
files = []
for ext in fontext:
files.extend(glob.glob(os.path.join(directory, '*.'+ext)))
return files
try:
for j in range(_winreg.QueryInfoKey(local)[1]):
try:
key, direc, any = _winreg.EnumValue( local, j)
if not os.path.dirname(direc):
direc = os.path.join(directory, direc)
direc = os.path.abspath(direc).lower()
if os.path.splitext(direc)[1][1:] in fontext:
items[direc] = 1
except EnvironmentError:
continue
except WindowsError:
continue
return items.keys()
finally:
_winreg.CloseKey(local)
return None
def OSXFontDirectory():
"""
Return the system font directories for OS X. This is done by
starting at the list of hardcoded paths in
:attr:`OSXFontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in OSXFontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def OSXInstalledFonts(directory=None, fontext='ttf'):
"""
Get list of font files on OS X - ignores font suffix by default.
"""
if directory is None:
directory = OSXFontDirectory()
fontext = get_fontext_synonyms(fontext)
files = []
for path in directory:
if fontext is None:
files.extend(glob.glob(os.path.join(path,'*')))
else:
for ext in fontext:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
return files
def x11FontDirectory():
"""
Return the system font directories for X11. This is done by
starting at the list of hardcoded paths in
:attr:`X11FontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in X11FontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def get_fontconfig_fonts(fontext='ttf'):
"""
Grab a list of all the fonts that are being tracked by fontconfig
by making a system call to ``fc-list``. This is an easy way to
grab all of the fonts the user wants to be made available to
applications, without needing knowing where all of them reside.
"""
try:
import commands
except ImportError:
return {}
fontext = get_fontext_synonyms(fontext)
fontfiles = {}
status, output = commands.getstatusoutput("fc-list file")
if status == 0:
for line in output.split('\n'):
fname = line.split(':')[0]
if (os.path.splitext(fname)[1][1:] in fontext and
os.path.exists(fname)):
fontfiles[fname] = 1
return fontfiles
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = {}
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
fontdir = win32FontDirectory()
fontpaths = [fontdir]
# now get all installed fonts directly...
for f in win32InstalledFonts(fontdir):
base, ext = os.path.splitext(f)
if len(ext)>1 and ext[1:].lower() in fontexts:
fontfiles[f] = 1
else:
fontpaths = x11FontDirectory()
# check for OS X & load its fonts if present
if sys.platform == 'darwin':
for f in OSXInstalledFonts(fontext=fontext):
fontfiles[f] = 1
for f in get_fontconfig_fonts(fontext):
fontfiles[f] = 1
elif isinstance(fontpaths, (str, unicode)):
fontpaths = [fontpaths]
for path in fontpaths:
files = []
for ext in fontexts:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
for fname in files:
fontfiles[os.path.abspath(fname)] = 1
return [fname for fname in fontfiles.keys() if os.path.exists(fname)]
def weight_as_number(weight):
"""
Return the weight property as a numeric value. String values
are converted to their corresponding numeric value.
"""
if isinstance(weight, str):
try:
weight = weight_dict[weight.lower()]
except KeyError:
weight = 400
elif weight in range(100, 1000, 100):
pass
else:
raise ValueError, 'weight not a valid integer'
return weight
class FontEntry(object):
"""
A class for storing Font properties. It is used when populating
the font lookup dictionary.
"""
def __init__(self,
fname ='',
name ='',
style ='normal',
variant='normal',
weight ='normal',
stretch='normal',
size ='medium',
):
self.fname = fname
self.name = name
self.style = style
self.variant = variant
self.weight = weight
self.stretch = stretch
try:
self.size = str(float(size))
except ValueError:
self.size = size
def ttfFontProperty(font):
"""
A function for populating the :class:`FontKey` by extracting
information from the TrueType font file.
*font* is a :class:`FT2Font` instance.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
sfnt2 = sfnt.get((1,0,0,2))
sfnt4 = sfnt.get((1,0,0,4))
if sfnt2:
sfnt2 = sfnt2.lower()
else:
sfnt2 = ''
if sfnt4:
sfnt4 = sfnt4.lower()
else:
sfnt4 = ''
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif font.style_flags & ft2font.ITALIC:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = None
for w in weight_dict.keys():
if sfnt4.find(w) >= 0:
weight = w
break
if not weight:
if font.style_flags & ft2font.BOLD:
weight = 700
else:
weight = 400
weight = weight_as_number(weight)
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
if sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or \
sfnt4.find('cond') >= 0:
stretch = 'condensed'
elif sfnt4.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# !!!! Incomplete
if font.scalable:
size = 'scalable'
else:
size = str(float(font.get_fontsize()))
# !!!! Incomplete
size_adjust = None
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
A function for populating a :class:`FontKey` instance by
extracting information from the AFM font file.
*font* is a class:`AFM` instance.
"""
name = font.get_familyname()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or name.lower().find('italic') >= 0:
style = 'italic'
elif name.lower().find('oblique') >= 0:
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = weight_as_number(font.get_weight().lower())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
# !!!! Incomplete
size_adjust = None
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def createFontList(fontfiles, fontext='ttf'):
"""
A function to create a font lookup list. The default is to create
a list of TrueType fonts. An AFM font list can optionally be
created.
"""
fontlist = []
# Add fonts from list of known font files.
seen = {}
for fpath in fontfiles:
verbose.report('createFontDict: %s' % (fpath), 'debug')
fname = os.path.split(fpath)[1]
if fname in seen: continue
else: seen[fname] = 1
if fontext == 'afm':
try:
fh = open(fpath, 'r')
except:
verbose.report("Could not open font file %s" % fpath)
continue
try:
try:
font = afm.AFM(fh)
finally:
fh.close()
except RuntimeError:
verbose.report("Could not parse font file %s"%fpath)
continue
prop = afmFontProperty(fpath, font)
else:
try:
font = ft2font.FT2Font(str(fpath))
except RuntimeError:
verbose.report("Could not open font file %s"%fpath)
continue
except UnicodeError:
verbose.report("Cannot handle unicode filenames")
#print >> sys.stderr, 'Bad file is', fpath
continue
try: prop = ttfFontProperty(font)
except: continue
fontlist.append(prop)
return fontlist
class FontProperties(object):
"""
A class for storing and manipulating font properties.
The font properties are those described in the `W3C Cascading
Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
specification. The six properties are:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either
'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
In that case, the actual font to be used will be looked up
from the associated rcParam in :file:`matplotlibrc`.
- style: Either 'normal', 'italic' or 'oblique'.
- variant: Either 'normal' or 'small-caps'.
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'
- size: Either an relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g. 12
The default font property for TrueType fonts (as specified in the
default :file:`matplotlibrc` file) is::
sans-serif, normal, normal, normal, normal, scalable.
Alternatively, a font may be specified using an absolute path to a
.ttf file, by using the *fname* kwarg.
The preferred usage of font sizes is to use the relative values,
e.g. 'large', instead of absolute font sizes, e.g. 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size, i.e. by using the
:meth:`FontManager.set_default_size` method.
This class will also accept a `fontconfig
<http://www.fontconfig.org/>`_ pattern, if it is the only argument
provided. See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_. This support
does not require fontconfig to be installed. We are merely
borrowing its pattern syntax for use here.
Note that matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in matplotlib than in other applications that use
fontconfig.
"""
def __init__(self,
family = None,
style = None,
variant= None,
weight = None,
stretch= None,
size = None,
fname = None, # if this is set, it's a hardcoded filename to use
_init = None # used only by copy()
):
self._family = None
self._slant = None
self._variant = None
self._weight = None
self._stretch = None
self._size = None
self._file = None
# This is used only by copy()
if _init is not None:
self.__dict__.update(_init.__dict__)
return
if is_string_like(family):
# Treat family as a fontconfig pattern if it is the only
# parameter provided.
if (style is None and
variant is None and
weight is None and
stretch is None and
size is None and
fname is None):
self.set_fontconfig_pattern(family)
return
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
def _parse_fontconfig_pattern(self, pattern):
return parse_fontconfig_pattern(pattern)
def __hash__(self):
l = self.__dict__.items()
l.sort()
return hash(repr(l))
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of font names that comprise the font family.
"""
if self._family is None:
family = rcParams['font.family']
if is_string_like(family):
return [family]
return family
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font
properties.
"""
return ft2font.FT2Font(str(findfont(self))).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if self._slant is None:
return rcParams['font.style']
return self._slant
get_slant = get_style
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or
'small-caps'.
"""
if self._variant is None:
return rcParams['font.variant']
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
if self._weight is None:
return rcParams['font.weight']
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
if self._stretch is None:
return rcParams['font.stretch']
return self._stretch
def get_size(self):
"""
Return the font size.
"""
if self._size is None:
return rcParams['font.size']
return self._size
def get_size_in_points(self):
if self._size is not None:
try:
return float(self._size)
except ValueError:
pass
default_size = fontManager.get_default_size()
return default_size * font_scalings.get(self._size)
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig pattern suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. May be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', or a real font name.
"""
if family is None:
self._family = None
else:
if is_string_like(family):
family = [family]
self._family = family
set_name = set_family
def set_style(self, style):
"""
Set the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if style not in ('normal', 'italic', 'oblique', None):
raise ValueError("style must be normal, italic or oblique")
self._slant = style
set_slant = set_style
def set_variant(self, variant):
"""
Set the font variant. Values are: 'normal' or 'small-caps'.
"""
if variant not in ('normal', 'small-caps', None):
raise ValueError("variant must be normal or small-caps")
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight. May be either a numeric value in the
range 0-1000 or one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
"""
if weight is not None:
try:
weight = int(weight)
if weight < 0 or weight > 1000:
raise ValueError()
except ValueError:
if weight not in weight_dict:
raise ValueError("weight is invalid")
self._weight = weight
def set_stretch(self, stretch):
"""
Set the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded' or
'ultra-expanded', or a numeric value in the range 0-1000.
"""
if stretch is not None:
try:
stretch = int(stretch)
if stretch < 0 or stretch > 1000:
raise ValueError()
except ValueError:
if stretch not in stretch_dict:
raise ValueError("stretch is invalid")
self._stretch = stretch
def set_size(self, size):
"""
Set the font size. Either an relative value of 'xx-small',
'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
or an absolute font size, e.g. 12.
"""
if size is not None:
try:
size = float(size)
except ValueError:
if size is not None and size not in font_scalings:
raise ValueError("size is invalid")
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = file
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig *pattern*.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
for key, val in self._parse_fontconfig_pattern(pattern).items():
if type(val) == list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def copy(self):
"""Return a deep copy of self"""
return FontProperties(_init = self)
def ttfdict_to_fnames(d):
"""
flatten a ttfdict to all the filenames it contains
"""
fnames = []
for named in d.values():
for styled in named.values():
for variantd in styled.values():
for weightd in variantd.values():
for stretchd in weightd.values():
for fname in stretchd.values():
fnames.append(fname)
return fnames
def pickle_dump(data, filename):
"""
Equivalent to pickle.dump(data, open(filename, 'w'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'w')
try:
pickle.dump(data, fh)
finally:
fh.close()
def pickle_load(filename):
"""
Equivalent to pickle.load(open(filename, 'r'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'r')
try:
data = pickle.load(fh)
finally:
fh.close()
return data
class FontManager:
"""
On import, the :class:`FontManager` singleton instance creates a
list of TrueType fonts based on the font properties: name, style,
variant, weight, stretch, and size. The :meth:`findfont` method
does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, a
default font is returned.
"""
def __init__(self, size=None, weight='normal'):
self.__default_weight = weight
self.default_size = size
paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
os.path.join(rcParams['datapath'], 'fonts', 'afm')]
# Create list of font paths
for pathname in ['TTFPATH', 'AFMPATH']:
if pathname in os.environ:
ttfpath = os.environ[pathname]
if ttfpath.find(';') >= 0: #win32 style
paths.extend(ttfpath.split(';'))
elif ttfpath.find(':') >= 0: # unix style
paths.extend(ttfpath.split(':'))
else:
paths.append(ttfpath)
verbose.report('font search path %s'%(str(paths)))
# Load TrueType fonts and create font dictionary.
self.ttffiles = findSystemFonts(paths) + findSystemFonts()
for fname in self.ttffiles:
verbose.report('trying fontname %s' % fname, 'debug')
if fname.lower().find('vera.ttf')>=0:
self.defaultFont = fname
break
else:
# use anything
self.defaultFont = self.ttffiles[0]
self.ttflist = createFontList(self.ttffiles)
if rcParams['pdf.use14corefonts']:
# Load only the 14 PDF core fonts. These fonts do not need to be
# embedded; every PDF viewing application is required to have them:
# Helvetica, Helvetica-Bold, Helvetica-Oblique, Helvetica-BoldOblique,
# Courier, Courier-Bold, Courier-Oblique, Courier-BoldOblique,
# Times-Roman, Times-Bold, Times-Italic, Times-BoldItalic, Symbol,
# ZapfDingbats.
afmpath = os.path.join(rcParams['datapath'],'fonts','pdfcorefonts')
afmfiles = findSystemFonts(afmpath, fontext='afm')
self.afmlist = createFontList(afmfiles, fontext='afm')
else:
self.afmfiles = findSystemFonts(paths, fontext='afm') + \
findSystemFonts(fontext='afm')
self.afmlist = createFontList(self.afmfiles, fontext='afm')
self.ttf_lookup_cache = {}
self.afm_lookup_cache = {}
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
def get_default_size(self):
"""
Return the default font size.
"""
if self.default_size is None:
return rcParams['font.size']
return self.default_size
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
def set_default_size(self, size):
"""
Set the default font size in points. The initial value is set
by ``font.size`` in rc.
"""
self.default_size = size
def update_fonts(self, filenames):
"""
Update the font dictionary with new font files.
Currently not implemented.
"""
# !!!! Needs implementing
raise NotImplementedError
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Returns a match score between the list of font families in
*families* and the font family name *family2*.
An exact match anywhere in the list returns 0.0.
A match by generic font name will return 0.1.
No match will return 1.0.
"""
for i, family1 in enumerate(families):
if family1.lower() in font_family_aliases:
if family1 == 'sans':
family1 == 'sans-serif'
options = rcParams['font.' + family1]
if family2 in options:
idx = options.index(family2)
return 0.1 * (float(idx) / len(options))
elif family1.lower() == family2.lower():
return 0.0
return 1.0
def score_style(self, style1, style2):
"""
Returns a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif style1 in ('italic', 'oblique') and \
style2 in ('italic', 'oblique'):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Returns a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Returns a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Returns a match score between *weight1* and *weight2*.
The result is the absolute value of the difference between the
CSS numeric values of *weight1* and *weight2*, normalized
between 0.0 and 1.0.
"""
try:
weightval1 = int(weight1)
except ValueError:
weightval1 = weight_dict.get(weight1, 500)
try:
weightval2 = int(weight2)
except ValueError:
weightval2 = weight_dict.get(weight2, 500)
return abs(weightval1 - weightval2) / 1000.0
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings(size1)
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
def findfont(self, prop, fontext='ttf'):
"""
Search the font list for the font that most closely matches
the :class:`FontProperties` *prop*.
:meth:`findfont` performs a nearest neighbor search. Each
font is given a similarity score to the target font
properties. The first font with the highest score is
returned. If no matches below a certain threshold are found,
the default font (usually Vera Sans) is returned.
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
"""
debug = False
if prop is None:
return self.defaultFont
if is_string_like(prop):
prop = FontProperties(prop)
fname = prop.get_file()
if fname is not None:
verbose.report('findfont returning %s'%fname, 'debug')
return fname
if fontext == 'afm':
font_cache = self.afm_lookup_cache
fontlist = self.afmlist
else:
font_cache = self.ttf_lookup_cache
fontlist = self.ttflist
cached = font_cache.get(hash(prop))
if cached:
return cached
best_score = 1e64
best_font = None
for font in fontlist:
# Matching family should have highest priority, so it is multiplied
# by 10.0
score = \
self.score_family(prop.get_family(), font.name) * 10.0 + \
self.score_style(prop.get_style(), font.style) + \
self.score_variant(prop.get_variant(), font.variant) + \
self.score_weight(prop.get_weight(), font.weight) + \
self.score_stretch(prop.get_stretch(), font.stretch) + \
self.score_size(prop.get_size(), font.size)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
verbose.report('findfont: Could not match %s. Returning %s' %
(prop, self.defaultFont))
result = self.defaultFont
else:
verbose.report('findfont: Matching %s to %s (%s) with score of %f' %
(prop, best_font.name, best_font.fname, best_score))
result = best_font.fname
font_cache[hash(prop)] = result
return result
_is_opentype_cff_font_cache = {}
def is_opentype_cff_font(filename):
"""
Returns True if the given font is a Postscript Compact Font Format
Font embedded in an OpenType wrapper. Used by the PostScript and
PDF backends that can not subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
result = _is_opentype_cff_font_cache.get(filename)
if result is None:
fd = open(filename, 'rb')
tag = fd.read(4)
fd.close()
result = (tag == 'OTTO')
_is_opentype_cff_font_cache[filename] = result
return result
return False
# The experimental fontconfig-based backend.
if USE_FONTCONFIG and sys.platform != 'win32':
import re
def fc_match(pattern, fontext):
import commands
fontexts = get_fontext_synonyms(fontext)
ext = "." + fontext
status, output = commands.getstatusoutput('fc-match -sv "%s"' % pattern)
if status == 0:
for match in _fc_match_regex.finditer(output):
file = match.group(1)
if os.path.splitext(file)[1][1:] in fontexts:
return file
return None
_fc_match_regex = re.compile(r'\sfile:\s+"([^"]*)"')
_fc_match_cache = {}
def findfont(prop, fontext='ttf'):
if not is_string_like(prop):
prop = prop.get_fontconfig_pattern()
cached = _fc_match_cache.get(prop)
if cached is not None:
return cached
result = fc_match(prop, fontext)
if result is None:
result = fc_match(':', fontext)
_fc_match_cache[prop] = result
return result
else:
_fmcache = os.path.join(get_configdir(), 'fontList.cache')
fontManager = None
def _rebuild():
global fontManager
fontManager = FontManager()
pickle_dump(fontManager, _fmcache)
verbose.report("generated new fontManager")
try:
fontManager = pickle_load(_fmcache)
fontManager.default_size = None
verbose.report("Using fontManager instance from %s" % _fmcache)
except:
_rebuild()
def findfont(prop, **kw):
global fontManager
font = fontManager.findfont(prop, **kw)
if not os.path.exists(font):
verbose.report("%s returned by pickled fontManager does not exist" % font)
_rebuild()
font = fontManager.findfont(prop, **kw)
return font
| agpl-3.0 |
raoulbq/scipy | scipy/signal/waveforms.py | 17 | 14814 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `t` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
| bsd-3-clause |
thomasmeagher/DS-501 | 3/text_analytics/solutions/exercise_02_sentiment.py | 46 | 2798 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| mit |
nrhine1/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
pele-python/mcpele | examples/sfHS_WCA_fluid/radial_distribution_function.py | 2 | 4232 | from __future__ import division
import numpy as np
from scipy.special import gamma
import matplotlib.pyplot as plt
from pele.potentials import HS_WCA
from pele.optimize import LBFGS_CPP
from mcpele.monte_carlo import _BaseMCRunner
from mcpele.monte_carlo import RandomCoordsDisplacement
from mcpele.monte_carlo import RecordPairDistHistogram
from mcpele.monte_carlo import MetropolisTest
class MC(_BaseMCRunner):
def set_control(self, temp):
self.set_temperature(temp)
class ComputeGR():
def __init__(self, boxdim=2, nr_particles=100, hard_phi=0.4,
nr_steps=1e6, epsilon=1, alpha=0.1, verbose=False):
# Settings.
np.random.seed(42)
# Input parameters.
self.boxdim = boxdim
self.nr_particles = nr_particles
self.hard_phi = hard_phi
self.nr_steps = nr_steps
self.epsilon = epsilon
self.alpha = alpha
self.verbose = verbose
# Derived quantities.
self.hard_radii = np.ones(self.nr_particles)
def volume_nball(radius, n):
return np.power(np.pi, n / 2) * np.power(radius, n) / gamma(n / 2 + 1)
self.box_length = np.power(np.sum(np.asarray([volume_nball(r, self.boxdim) for r in self.hard_radii])) / self.hard_phi, 1 / self.boxdim)
self.box_vector = np.ones(self.boxdim) * self.box_length
# HS-WCA potential.
self.potential = HS_WCA(use_periodic=True, use_cell_lists=True,
ndim=self.boxdim, eps=self.epsilon,
sca=self.alpha, radii=self.hard_radii,
boxvec=self.box_vector)
# Initial configuration by minimization.
self.nr_dof = self.boxdim * self.nr_particles
self.x = np.random.uniform(-0.5 * self.box_length, 0.5 * self.box_length, self.nr_dof)
optimizer = LBFGS_CPP(self.x, self.potential)
optimizer.run()
if not optimizer.get_result().success:
print ("warning: minimization has not converged")
self.x = optimizer.get_result().coords.copy()
# Potential and MC rules.
self.temperature = 1
self.mc = MC(self.potential, self.x, self.temperature, self.nr_steps)
self.step = RandomCoordsDisplacement(42, 1, single=True, nparticles=self.nr_particles, bdim=self.boxdim)
if self.verbose:
print ("initial MC stepsize")
print self.step.get_stepsize()
self.mc.set_takestep(self.step)
self.eq_steps = self.nr_steps / 2
self.mc.set_report_steps(self.eq_steps)
self.gr_quench = RecordPairDistHistogram(self.box_vector, 50, self.eq_steps, self.nr_particles, optimizer=optimizer)
self.gr = RecordPairDistHistogram(self.box_vector, 50, self.eq_steps, self.nr_particles)
self.mc.add_action(self.gr_quench)
self.mc.add_action(self.gr)
self.test = MetropolisTest(44)
self.mc.add_accept_test(self.test)
def run(self):
self.mc.set_print_progress()
if not self.verbose:
self.mc.disable_input_warnings()
self.mc.run()
if self.verbose:
print ("adapted MC stepsize")
print self.step.get_stepsize()
def show_result(self):
r = self.gr.get_hist_r()
number_density = self.nr_particles / np.prod(self.box_vector)
gr = self.gr.get_hist_gr(number_density, self.nr_particles)
grq = self.gr_quench.get_hist_gr(number_density, self.nr_particles)
plt.plot(r, gr, "o-", label="Equilibrium")
plt.plot(r, grq, "x-", label="Quench")
plt.xlabel(r"Distance $r$")
plt.ylabel(r"Radial distr. function $g(r)$")
plt.legend()
plt.show()
if __name__ == "__main__":
box_dimension = 2
nr_particles = 100
hard_volume_fraction = 0.4
nr_steps = 1e5
alpha = 0.48
verbose = False
simulation = ComputeGR(boxdim=box_dimension,
nr_particles=nr_particles,
hard_phi=hard_volume_fraction,
nr_steps=nr_steps,
alpha=alpha,
verbose=verbose)
simulation.run()
simulation.show_result()
| gpl-3.0 |
gvanhorn38/active_neurofinder | movie.py | 1 | 1737 | """
Create a video out of the images
"""
from matplotlib import pyplot as plt
import matplotlib.animation as animation
import numpy as np
from scipy import interpolate
from glob import glob
from util import load_images, load_regions
import os
from scipy.misc import imread
from PIL import Image
def create_movie(dataset_path, output_path, outline=False, fps=30, dpi=100):
"""
Creates a mp4 file.
"""
files = sorted(glob(os.path.join(dataset_path, 'images/*.tiff')))
images = [Image.open(files[0])]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if outline:
regions = load_regions(dataset_path)
boundaries = regions.mask(dims=images[0].shape, stroke='red', fill=None, base=np.zeros(images[0].shape))
boundaries = (boundaries[:,:,0] > 0) + 0
mx = np.max(images)
mn = np.min(images)
f = interpolate.interp1d([mn, mx], [0, 255])
mod_images = []
for image in images:
g_i_r = f(image).astype(np.uint8)
g_i_r[boundaries > 0] = 255
g_i_g = f(image).astype(np.uint8)
g_i_g[boundaries > 0] = 0
g_i_b = f(image).astype(np.uint8)
g_i_b[boundaries > 0] = 0
i = np.dstack([g_i_r, g_i_g, g_i_b])
mod_images.append(i)
images = np.array(mod_images)
im = ax.imshow(images[0] ,cmap='gray')
fig.set_size_inches([5,5])
plt.tight_layout()
def update_img(n):
tmp = Image.open(files[n])
im.set_data(tmp)
return im
animator = animation.FuncAnimation(fig,update_img,len(files),interval=fps)
writer = animation.writers['ffmpeg'](fps=fps)
animator.save(output_path,writer=writer,dpi=dpi)
| mit |
zorroblue/scikit-learn | sklearn/datasets/tests/test_20news.py | 75 | 3266 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
try:
datasets.fetch_20newsgroups(subset='all',
download_if_missing=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# test subset = train
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 130107))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
# test subset = test
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 130107))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
# test subset = all
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 130107))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
sarathid/Learning | Intro_to_ML/tools/email_preprocess.py | 10 | 2628 | #!/usr/bin/python
import pickle
import cPickle
import numpy
from sklearn import cross_validation
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif
def preprocess(words_file = "../tools/word_data.pkl", authors_file="../tools/email_authors.pkl"):
"""
this function takes a pre-made list of email texts (by default word_data.pkl)
and the corresponding authors (by default email_authors.pkl) and performs
a number of preprocessing steps:
-- splits into training/testing sets (10% testing)
-- vectorizes into tfidf matrix
-- selects/keeps most helpful features
after this, the feaures and labels are put into numpy arrays, which play nice with sklearn functions
4 objects are returned:
-- training/testing features
-- training/testing labels
"""
### the words (features) and authors (labels), already largely preprocessed
### this preprocessing will be repeated in the text learning mini-project
authors_file_handler = open(authors_file, "r")
authors = pickle.load(authors_file_handler)
authors_file_handler.close()
words_file_handler = open(words_file, "r")
word_data = cPickle.load(words_file_handler)
words_file_handler.close()
### test_size is the percentage of events assigned to the test set
### (remainder go into training)
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)
### text vectorization--go from strings to lists of numbers
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train_transformed = vectorizer.fit_transform(features_train)
features_test_transformed = vectorizer.transform(features_test)
### feature selection, because text is super high dimensional and
### can be really computationally chewy as a result
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(features_train_transformed, labels_train)
features_train_transformed = selector.transform(features_train_transformed).toarray()
features_test_transformed = selector.transform(features_test_transformed).toarray()
### info on the data
print "no. of Chris training emails:", sum(labels_train)
print "no. of Sara training emails:", len(labels_train)-sum(labels_train)
return features_train_transformed, features_test_transformed, labels_train, labels_test
| gpl-3.0 |
icereval/modular-file-renderer | mfr/ext/tabular/configuration.py | 3 | 1191 | # -*- coding: utf-8 -*-
"""Configuration object for the mfr_tabular module."""
from .libs import (
csv_pandas,
tsv_pandas,
dta_pandas,
sav_pandas,
xlsx_xlrd
)
from mfr import Config
"""Defines a list of functions that can handle a particular file type. The
functions will be attempted in order, failing if they do not have the
requirements. Max size is the largest number of columns or rows allowed in a
single table"""
config = Config(defaults={
'libs': {
'.csv': [csv_pandas],
'.tsv': [tsv_pandas],
'.xlsx': [xlsx_xlrd],
'.xls': [xlsx_xlrd],
'.dta': [dta_pandas],
'.sav': [sav_pandas],
# '.ods': [ods_ezodf],
},
'max_size': 10000,
'table_width': 700, # pixels
'table_height': 600, # pixels
'slick_grid_options': {
'small_table': {
'enableCellNavigation': True,
'enableColumnReorder': False,
'forceFitColumns': True,
'syncColumnCellResize': True,
},
'big_table': {
'enableCellNavigation': True,
'enableColumnReorder': False,
'syncColumnCellResize': True,
},
},
})
| apache-2.0 |
timqian/sms-tools | lectures/6-Harmonic-model/plots-code/f0Twm-piano.py | 19 | 1261 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackman
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import stft as STFT
import sineModel as SM
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/piano.wav')
w = np.blackman(1501)
N = 2048
t = -90
minf0 = 100
maxf0 = 300
f0et = 1
maxnpeaksTwm = 4
H = 128
x1 = x[1.5*fs:1.8*fs]
plt.figure(1, figsize=(9, 7))
mX, pX = STFT.stftAnal(x, fs, w, N, H)
f0 = HM.f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et)
f0 = UF.cleaningTrack(f0, 5)
yf0 = UF.sinewaveSynth(f0, .8, H, fs)
f0[f0==0] = np.nan
maxplotfreq = 800.0
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N*maxplotfreq/fs+1]))
plt.autoscale(tight=True)
plt.plot(frmTime, f0, linewidth=2, color='k')
plt.autoscale(tight=True)
plt.title('mX + f0 (piano.wav), TWM')
plt.tight_layout()
plt.savefig('f0Twm-piano.png')
UF.wavwrite(yf0, fs, 'f0Twm-piano.wav')
plt.show()
| agpl-3.0 |
dilawar/moose-full | moose-examples/snippets/insertSpines.py | 2 | 2798 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2015 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
# This example illustrates loading a model from an SWC file, inserting
# spines, and viewing it.
import moogli
import moose
from matplotlib.cm import gnuplot
from PyQt4 import Qt, QtCore, QtGui
import sys
import os
import rdesigneur as rd
PI = 3.14159265358979
frameRunTime = 0.0002
runtime = 1.0
inject = 5e-10
simdt = 5e-5
RM = 1.0
RA = 1.0
CM = 0.01
spineSpacing = 2.0e-6
minSpacing = 0.2e-6
spineSize = 1.0
spineSizeDistrib = 0.5
spineAngle = 0
spineAngleDistrib = 2*PI
def main():
app = QtGui.QApplication(sys.argv)
filename = 'barrionuevo_cell1zr.CNG.swc'
#filename = 'h10.CNG.swc'
moose.Neutral( '/library' )
rdes = rd.rdesigneur( \
cellProto = [[ filename, 'elec' ] ],\
spineProto = [['makeSpineProto()', 'spine' ]] ,\
spineDistrib = [ \
['spine', '#apical#', \
'spacing', str( spineSpacing ), \
'spacingDistrib', str( minSpacing ), \
'angle', str( spineAngle ), \
'angleDistrib', str( spineAngleDistrib ), \
'size', str( spineSize ), \
'sizeDistrib', str( spineSizeDistrib ) ] \
] \
)
rdes.buildModel( '/model' )
moose.reinit()
# Now we set up the display
compts = moose.wildcardFind( "/model/elec/#[ISA=CompartmentBase]" )
compts[0].inject = inject
ecomptPath = map( lambda x : x.path, compts )
morphology = moogli.read_morphology_from_moose(name = "", path = "/model/elec")
#morphology.create_group( "group_all", ecomptPath, -0.08, 0.02, \
# [0.0, 0.5, 1.0, 1.0], [1.0, 0.0, 0.0, 0.9] )
morphology.create_group( "group_all", ecomptPath, -0.08, 0.02, \
gnuplot )
viewer = moogli.DynamicMorphologyViewerWidget(morphology)
viewer.set_background_color( 1.0, 1.0, 1.0, 1.0 )
def callback( morphology, viewer ):
moose.start( frameRunTime )
Vm = map( lambda x: moose.element( x ).Vm, compts )
morphology.set_color( "group_all", Vm )
currTime = moose.element( '/clock' ).currentTime
#print currTime, compts[0].Vm
if ( currTime < runtime ):
return True
return False
viewer.set_callback( callback, idletime = 0 )
viewer.showMaximized()
viewer.show()
app.exec_()
if __name__ == '__main__':
main()
| gpl-2.0 |
kashif/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 83 | 5888 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhouette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distinct cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
cbertinato/pandas | pandas/tests/plotting/test_datetimelike.py | 1 | 57288 | """ Test cases for time series specific (freq conversion, etc) """
from datetime import date, datetime, time, timedelta
import pickle
import sys
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, NaT, Series, isna
from pandas.core.indexes.datetimes import bdate_range, date_range
from pandas.core.indexes.period import Period, PeriodIndex, period_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.resample import DatetimeIndex
from pandas.tests.plotting.common import TestPlotBase
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal, ensure_clean
from pandas.tseries.offsets import DateOffset
@td.skip_if_no_mpl
class TestTSPlot(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
self.freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']
idx = [
period_range('12/31/1999', freq=x, periods=100) for x in self.freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']
idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
def teardown_method(self, method):
tm.close()
@pytest.mark.slow
def test_ts_plot_with_tz(self):
# GH2877
index = date_range('1/1/2011', periods=2, freq='H',
tz='Europe/Brussels')
ts = Series([188.5, 328.25], index=index)
_check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
df = DataFrame(np.random.randn(10, 9), index=range(10))
fig, ax = self.plt.subplots()
df.plot(fontsize=2, ax=ax)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
assert label.get_fontsize() == 2
@pytest.mark.slow
def test_frame_inferred(self):
# inferred freq
idx = date_range('1/1/1987', freq='MS', periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_is_error_nozeroindex(self):
# GH11858
i = np.array([1, 2, 3])
a = DataFrame(i, index=i)
_check_plot_works(a.plot, xerr=a)
_check_plot_works(a.plot, yerr=a)
def test_nonnumeric_exclude(self):
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]}, idx)
fig, ax = self.plt.subplots()
df.plot(ax=ax) # it works
assert len(ax.get_lines()) == 1 # B was plotted
self.plt.close(fig)
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
df['A'].plot()
def test_tsplot_deprecated(self):
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
ts = tm.makeTimeSeries()
with tm.assert_produces_warning(FutureWarning):
tsplot(ts, self.plt.Axes.plot, ax=ax)
@pytest.mark.slow
def test_tsplot(self):
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
ts = tm.makeTimeSeries()
def f(*args, **kwds):
with tm.assert_produces_warning(FutureWarning):
return tsplot(s, self.plt.Axes.plot, *args, **kwds)
for s in self.period_ser:
_check_plot_works(f, s.index.freq, ax=ax, series=s)
for s in self.datetime_ser:
_check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)
for s in self.period_ser:
_check_plot_works(s.plot, ax=ax)
for s in self.datetime_ser:
_check_plot_works(s.plot, ax=ax)
_, ax = self.plt.subplots()
ts.plot(style='k', ax=ax)
color = (0., 0., 0., 1)
assert color == ax.get_lines()[0].get_color()
def test_both_style_and_color(self):
ts = tm.makeTimeSeries()
msg = ("Cannot pass 'style' string with a color symbol and 'color' "
"keyword argument. Please use one or the other or pass 'style'"
" without a color symbol")
with pytest.raises(ValueError, match=msg):
ts.plot(style='b-', color='#000099')
s = ts.reset_index(drop=True)
with pytest.raises(ValueError, match=msg):
s.plot(style='b-', color='#000099')
@pytest.mark.slow
def test_high_freq(self):
freaks = ['ms', 'us']
for freq in freaks:
_, ax = self.plt.subplots()
rng = date_range('1/1/2012', periods=100, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot, ax=ax)
def test_get_datevalue(self):
from pandas.plotting._matplotlib.converter import get_datevalue
assert get_datevalue(None, 'D') is None
assert get_datevalue(1987, 'A') == 1987
assert (get_datevalue(Period(1987, 'A'), 'M') ==
Period('1987-12', 'M').ordinal)
assert (get_datevalue('1/1/1987', 'D') ==
Period('1987-1-1', 'D').ordinal)
@pytest.mark.slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
assert expected_string == ax.format_coord(first_x, first_y)
except (ValueError):
pytest.skip("skipping test because issue forming "
"test comparison GH7664")
annual = Series(1, index=date_range('2014-01-01', periods=3,
freq='A-DEC'))
_, ax = self.plt.subplots()
annual.plot(ax=ax)
check_format_of_first_point(ax, 't = 2014 y = 1.000000')
# note this is added to the annual plot already in existence, and
# changes its freq field
daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))
daily.plot(ax=ax)
check_format_of_first_point(ax,
't = 2014-01-01 y = 1.000000')
tm.close()
# tsplot
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
with tm.assert_produces_warning(FutureWarning):
tsplot(annual, self.plt.Axes.plot, ax=ax)
check_format_of_first_point(ax, 't = 2014 y = 1.000000')
with tm.assert_produces_warning(FutureWarning):
tsplot(daily, self.plt.Axes.plot, ax=ax)
check_format_of_first_point(ax, 't = 2014-01-01 y = 1.000000')
@pytest.mark.slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@pytest.mark.slow
@pytest.mark.parametrize(
'frqncy', ['1S', '3S', '5T', '7H', '4D', '8W', '11M', '3A'])
def test_line_plot_period_mlt_series(self, frqncy):
# test period index line plot for series with multiples (`mlt`) of the
# frequency (`frqncy`) rule code. tests resolution of issue #14763
idx = period_range('12/31/1999', freq=frqncy, periods=100)
s = Series(np.random.randn(len(idx)), idx)
_check_plot_works(s.plot, s.index.freq.rule_code)
@pytest.mark.slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
@pytest.mark.slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@pytest.mark.slow
@pytest.mark.parametrize(
'frqncy', ['1S', '3S', '5T', '7H', '4D', '8W', '11M', '3A'])
def test_line_plot_period_mlt_frame(self, frqncy):
# test period index line plot for DataFrames with multiples (`mlt`)
# of the frequency (`frqncy`) rule code. tests resolution of issue
# #14763
idx = period_range('12/31/1999', freq=frqncy, periods=100)
df = DataFrame(np.random.randn(len(idx), 3), index=idx,
columns=['A', 'B', 'C'])
freq = df.index.asfreq(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@pytest.mark.slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@pytest.mark.slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
_, ax = self.plt.subplots()
rng = date_range('2001-1-1', '2001-1-10')
ts = Series(range(len(rng)), index=rng)
ts = ts[:3].append(ts[5:])
ts.plot(ax=ax)
assert not hasattr(ax, 'freq')
@pytest.mark.slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq='BQS', periods=10)
ser = Series(np.random.randn(len(dr)), index=dr)
_check_plot_works(ser.plot)
@pytest.mark.slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1),
datetime(2000, 1, 6),
datetime(2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), index=dr)
_check_plot_works(ser.plot)
@pytest.mark.slow
def test_uhf(self):
import pandas.plotting._matplotlib.converter as conv
idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)
df = DataFrame(np.random.randn(len(idx), 2), index=idx)
_, ax = self.plt.subplots()
df.plot(ax=ax)
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')
rs = str(label.get_text())
if len(rs):
assert xp == rs
@pytest.mark.slow
def test_irreg_hf(self):
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), index=idx)
irreg = df.iloc[[0, 1, 3, 4]]
_, ax = self.plt.subplots()
irreg.plot(ax=ax)
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()
_, ax = self.plt.subplots()
df2 = df.copy()
df2.index = df.index.astype(object)
df2.plot(ax=ax)
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
assert (np.fabs(diffs[1:] - sec) < 1e-8).all()
def test_irregular_datetime64_repr_bug(self):
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
_, ax = self.plt.subplots()
ret = ser.plot(ax=ax)
assert ret is not None
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
assert rs == xp
def test_business_freq(self):
bts = tm.makePeriodSeries()
_, ax = self.plt.subplots()
bts.plot(ax=ax)
assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal
idx = ax.get_lines()[0].get_xdata()
assert PeriodIndex(data=idx).freqstr == 'B'
@pytest.mark.slow
def test_business_freq_convert(self):
bts = tm.makeTimeSeries(300).asfreq('BM')
ts = bts.to_period('M')
_, ax = self.plt.subplots()
bts.plot(ax=ax)
assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal
idx = ax.get_lines()[0].get_xdata()
assert PeriodIndex(data=idx).freqstr == 'M'
def test_nonzero_base(self):
# GH2571
idx = (date_range('2012-12-20', periods=24, freq='H') + timedelta(
minutes=30))
df = DataFrame(np.arange(24), index=idx)
_, ax = self.plt.subplots()
df.plot(ax=ax)
rs = ax.get_lines()[0].get_xdata()
assert not Index(rs).is_normalized
def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
_, ax = self.plt.subplots()
bts.plot(ax=ax)
idx = ax.get_lines()[0].get_xdata()
tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx))
@pytest.mark.slow
def test_axis_limits(self):
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
result = ax.get_xlim()
assert result[0] == xlim[0] - 5
assert result[1] == xlim[1] + 10
# string
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim('1/1/2000', '4/1/2000')
result = ax.get_xlim()
assert int(result[0]) == expected[0].ordinal
assert int(result[1]) == expected[1].ordinal
# datetime
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
result = ax.get_xlim()
assert int(result[0]) == expected[0].ordinal
assert int(result[1]) == expected[1].ordinal
fig = ax.get_figure()
self.plt.close(fig)
ser = tm.makeTimeSeries()
_, ax = self.plt.subplots()
ser.plot(ax=ax)
_test(ax)
_, ax = self.plt.subplots()
df = DataFrame({'a': ser, 'b': ser + 1})
df.plot(ax=ax)
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.plotting._matplotlib.converter as conv
assert conv.get_finder('B') == conv._daily_finder
assert conv.get_finder('D') == conv._daily_finder
assert conv.get_finder('M') == conv._monthly_finder
assert conv.get_finder('Q') == conv._quarterly_finder
assert conv.get_finder('A') == conv._annual_finder
assert conv.get_finder('W') == conv._daily_finder
@pytest.mark.slow
def test_finder_daily(self):
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
xpl1 = xpl2 = [Period('1999-1-1', freq='B').ordinal] * len(day_lst)
rs1 = []
rs2 = []
for i, n in enumerate(day_lst):
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs1.append(xaxis.get_majorticklocs()[0])
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs2.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
assert rs1 == xpl1
assert rs2 == xpl2
@pytest.mark.slow
def test_finder_quarterly(self):
yrs = [3.5, 11]
xpl1 = xpl2 = [Period('1988Q1').ordinal] * len(yrs)
rs1 = []
rs2 = []
for i, n in enumerate(yrs):
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs1.append(xaxis.get_majorticklocs()[0])
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs2.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
assert rs1 == xpl1
assert rs2 == xpl2
@pytest.mark.slow
def test_finder_monthly(self):
yrs = [1.15, 2.5, 4, 11]
xpl1 = xpl2 = [Period('Jan 1988').ordinal] * len(yrs)
rs1 = []
rs2 = []
for i, n in enumerate(yrs):
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs1.append(xaxis.get_majorticklocs()[0])
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs2.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
assert rs1 == xpl1
assert rs2 == xpl2
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1989Q1', 'M').ordinal
assert rs == xp
@pytest.mark.slow
def test_finder_annual(self):
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
xp = [Period(x, freq='A').ordinal for x in xp]
rs = []
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
assert rs == xp
@pytest.mark.slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range('1/1/1999', freq='Min', periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='Min').ordinal
assert rs == xp
def test_finder_hourly(self):
nhours = 23
rng = date_range('1/1/1999', freq='H', periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='H').ordinal
assert rs == xp
@pytest.mark.slow
def test_gaps(self):
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
_, ax = self.plt.subplots()
ts.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
line = lines[0]
data = line.get_xydata()
if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[5:25, 1].all()
self.plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
_, ax = self.plt.subplots()
ax = ts.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
line = lines[0]
data = line.get_xydata()
if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
self.plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
_, ax = self.plt.subplots()
ser.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
line = lines[0]
data = line.get_xydata()
if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
@pytest.mark.slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
_, ax = self.plt.subplots()
low.plot(ax=ax)
idxh = date_range(low.index[0], low.index[-1], freq='12h')
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
assert len(lines) == 1
assert len(ax.right_ax.get_lines()) == 1
line = lines[0]
data = line.get_xydata()
if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[5:25, 1].all()
@pytest.mark.slow
def test_secondary_y(self):
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
fig, _ = self.plt.subplots()
ax = ser.plot(secondary_y=True)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
line = ax.get_lines()[0]
xp = Series(line.get_ydata(), line.get_xdata())
assert_series_equal(ser, xp)
assert ax.get_yaxis().get_ticks_position() == 'right'
assert not axes[0].get_yaxis().get_visible()
self.plt.close(fig)
_, ax2 = self.plt.subplots()
ser2.plot(ax=ax2)
assert (ax2.get_yaxis().get_ticks_position() ==
self.default_tick_position)
self.plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
assert ax.get_yaxis().get_visible()
assert not hasattr(ax, 'left_ax')
assert hasattr(ax, 'right_ax')
assert hasattr(ax2, 'left_ax')
assert not hasattr(ax2, 'right_ax')
@pytest.mark.slow
def test_secondary_y_ts(self):
idx = date_range('1/1/2000', periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
fig, _ = self.plt.subplots()
ax = ser.plot(secondary_y=True)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
line = ax.get_lines()[0]
xp = Series(line.get_ydata(), line.get_xdata()).to_timestamp()
assert_series_equal(ser, xp)
assert ax.get_yaxis().get_ticks_position() == 'right'
assert not axes[0].get_yaxis().get_visible()
self.plt.close(fig)
_, ax2 = self.plt.subplots()
ser2.plot(ax=ax2)
assert (ax2.get_yaxis().get_ticks_position() ==
self.default_tick_position)
self.plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
assert ax.get_yaxis().get_visible()
@pytest.mark.slow
@td.skip_if_no_scipy
def test_secondary_kde(self):
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
ax = ser.plot(secondary_y=True, kind='density', ax=ax)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == 'right'
@pytest.mark.slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
ser.plot(secondary_y=True, kind='bar', ax=ax)
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == 'right'
@pytest.mark.slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(secondary_y=['a', 'c'], subplots=True)
assert axes[0].get_yaxis().get_ticks_position() == 'right'
assert (axes[1].get_yaxis().get_ticks_position() ==
self.default_tick_position)
assert axes[2].get_yaxis().get_ticks_position() == 'right'
@pytest.mark.slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)
assert axes[0].get_yaxis().get_ticks_position() == 'right'
assert (axes[1].get_yaxis().get_ticks_position() ==
self.default_tick_position)
assert axes[2].get_yaxis().get_ticks_position() == 'right'
def test_mixed_freq_regular_first(self):
# TODO
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
# it works!
_, ax = self.plt.subplots()
s1.plot(ax=ax)
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
tm.assert_index_equal(idx1, s1.index.to_period('B'))
tm.assert_index_equal(idx2, s2.index.to_period('B'))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
assert left <= pidx[0].ordinal
assert right >= pidx[-1].ordinal
@pytest.mark.slow
def test_mixed_freq_irregular_first(self):
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
_, ax = self.plt.subplots()
s2.plot(style='g', ax=ax)
s1.plot(ax=ax)
assert not hasattr(ax, 'freq')
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
_, ax = self.plt.subplots()
s1.plot(ax=ax)
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
assert idx1.equals(s1.index.to_period('B'))
assert idx2.equals(s2.index.to_period('B'))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
assert left <= pidx[0].ordinal
assert right >= pidx[-1].ordinal
@pytest.mark.slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
_, ax = self.plt.subplots()
s2.plot(style='g', ax=ax)
s1.plot(ax=ax)
assert not hasattr(ax, 'freq')
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'D'
@pytest.mark.slow
def test_mixed_freq_alignment(self):
ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq('T').interpolate()
_, ax = self.plt.subplots()
ax = ts.plot(ax=ax)
ts2.plot(style='r', ax=ax)
assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0]
@pytest.mark.slow
def test_mixed_freq_lf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(legend=True, ax=ax)
high.plot(legend=True, ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'D'
leg = ax.get_legend()
assert len(leg.texts) == 2
self.plt.close(ax.get_figure())
idxh = date_range('1/1/1999', periods=240, freq='T')
idxl = date_range('1/1/1999', periods=4, freq='H')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'T'
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range('1/3/2000', periods=30, freq='B')
ps = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
irreg.plot(ax=ax)
ps.plot(ax=ax)
def test_mixed_freq_shared_ax(self):
# GH13341, using sharex=True
idx1 = date_range('2015-01-01', periods=3, freq='M')
idx2 = idx1[:1].union(idx1[2:])
s1 = Series(range(len(idx1)), idx1)
s2 = Series(range(len(idx2)), idx2)
fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)
s1.plot(ax=ax1)
s2.plot(ax=ax2)
assert ax1.freq == 'M'
assert ax2.freq == 'M'
assert (ax1.lines[0].get_xydata()[0, 0] ==
ax2.lines[0].get_xydata()[0, 0])
# using twinx
fig, ax1 = self.plt.subplots()
ax2 = ax1.twinx()
s1.plot(ax=ax1)
s2.plot(ax=ax2)
assert (ax1.lines[0].get_xydata()[0, 0] ==
ax2.lines[0].get_xydata()[0, 0])
# TODO (GH14330, GH14322)
# plotting the irregular first does not yet work
# fig, ax1 = plt.subplots()
# ax2 = ax1.twinx()
# s2.plot(ax=ax1)
# s1.plot(ax=ax2)
# assert (ax1.lines[0].get_xydata()[0, 0] ==
# ax2.lines[0].get_xydata()[0, 0])
def test_nat_handling(self):
_, ax = self.plt.subplots()
dti = DatetimeIndex(['2015-01-01', NaT, '2015-01-03'])
s = Series(range(len(dti)), dti)
s.plot(ax=ax)
xdata = ax.get_lines()[0].get_xdata()
# plot x data is bounded by index values
assert s.index.min() <= Series(xdata).min()
assert Series(xdata).max() <= s.index.max()
@pytest.mark.slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
_, ax = self.plt.subplots()
from pandas.tseries.plotting import tsplot
with tm.assert_produces_warning(FutureWarning):
tsplot(high, self.plt.Axes.plot, ax=ax)
with tm.assert_produces_warning(FutureWarning):
lines = tsplot(low, self.plt.Axes.plot, ax=ax)
for l in lines:
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
@pytest.mark.slow
def test_from_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
expected_h = idxh.to_period().asi8.astype(np.float64)
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544,
1549, 1553, 1558, 1562], dtype=np.float64)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
tm.assert_numpy_array_equal(xdata, expected_l)
else:
tm.assert_numpy_array_equal(xdata, expected_h)
tm.close()
_, ax = self.plt.subplots()
from pandas.tseries.plotting import tsplot
with tm.assert_produces_warning(FutureWarning):
tsplot(low, self.plt.Axes.plot, ax=ax)
with tm.assert_produces_warning(FutureWarning):
lines = tsplot(high, self.plt.Axes.plot, ax=ax)
for l in lines:
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
tm.assert_numpy_array_equal(xdata, expected_l)
else:
tm.assert_numpy_array_equal(xdata, expected_h)
@pytest.mark.slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = DataFrame(np.random.rand(len(idxh), 3),
index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3),
index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
_, ax = self.plt.subplots()
low.plot(kind=kind1, stacked=True, ax=ax)
high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
line = ax.lines[i]
assert PeriodIndex(line.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(line.get_xdata(orig=False),
expected_x)
# check stacked values are correct
expected_y += low[i].values
tm.assert_numpy_array_equal(line.get_ydata(orig=False),
expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
line = ax.lines[3 + i]
assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(line.get_xdata(orig=False),
expected_x)
expected_y += high[i].values
tm.assert_numpy_array_equal(line.get_ydata(orig=False),
expected_y)
# high to low
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
_, ax = self.plt.subplots()
high.plot(kind=kind1, stacked=True, ax=ax)
low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
line = ax.lines[i]
assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(line.get_xdata(orig=False),
expected_x)
expected_y += high[i].values
tm.assert_numpy_array_equal(line.get_ydata(orig=False),
expected_y)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
lines = ax.lines[3 + i]
assert PeriodIndex(data=lines.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(lines.get_xdata(orig=False),
expected_x)
expected_y += low[i].values
tm.assert_numpy_array_equal(lines.get_ydata(orig=False),
expected_y)
@pytest.mark.slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range('2014-07-01 09:00', freq='S', periods=50)
idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
assert len(ax.get_lines()) == 2
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'L'
tm.close()
# low to high
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
assert len(ax.get_lines()) == 2
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'L'
@pytest.mark.slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range('1/1/2000', periods=10)
idx = idx[[0, 2, 5, 9]].astype(object)
df = DataFrame(np.random.randn(len(idx), 3), idx)
_, ax = self.plt.subplots()
_check_plot_works(df.plot, ax=ax)
@pytest.mark.slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
fig, ax = self.plt.subplots()
df.plot(ax=ax)
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
rs = l.get_text()
if len(rs) > 0:
if s != 0:
xp = time(h, m, s).strftime('%H:%M:%S')
else:
xp = time(h, m, s).strftime('%H:%M')
assert xp == rs
@pytest.mark.slow
@pytest.mark.xfail(strict=False, reason="Unreliable test")
def test_time_change_xlim(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
fig, ax = self.plt.subplots()
df.plot(ax=ax)
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
rs = l.get_text()
if len(rs) > 0:
if s != 0:
xp = time(h, m, s).strftime('%H:%M:%S')
else:
xp = time(h, m, s).strftime('%H:%M')
assert xp == rs
# change xlim
ax.set_xlim('1:30', '5:00')
# check tick labels again
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
rs = l.get_text()
if len(rs) > 0:
if s != 0:
xp = time(h, m, s).strftime('%H:%M:%S')
else:
xp = time(h, m, s).strftime('%H:%M')
assert xp == rs
@pytest.mark.slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time()
for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
fig, ax = self.plt.subplots()
ax = df.plot(ax=ax)
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
us = int(round((t - int(t)) * 1e6))
h, m = divmod(m, 60)
rs = l.get_text()
if len(rs) > 0:
if (us % 1000) != 0:
xp = time(h, m, s, us).strftime('%H:%M:%S.%f')
elif (us // 1000) != 0:
xp = time(h, m, s, us).strftime('%H:%M:%S.%f')[:-3]
elif s != 0:
xp = time(h, m, s, us).strftime('%H:%M:%S')
else:
xp = time(h, m, s, us).strftime('%H:%M')
assert xp == rs
@pytest.mark.slow
def test_secondary_upsample(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
ax = high.plot(secondary_y=True, ax=ax)
for l in ax.get_lines():
assert PeriodIndex(l.get_xdata()).freq == 'D'
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
for l in ax.left_ax.get_lines():
assert PeriodIndex(l.get_xdata()).freq == 'D'
@pytest.mark.slow
def test_secondary_legend(self):
fig = self.plt.figure()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
df.plot(secondary_y=['A', 'B'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert leg.get_texts()[0].get_text() == 'A (right)'
assert leg.get_texts()[1].get_text() == 'B (right)'
assert leg.get_texts()[2].get_text() == 'C'
assert leg.get_texts()[3].get_text() == 'D'
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close(fig)
fig = self.plt.figure()
ax = fig.add_subplot(211)
df.plot(secondary_y=['A', 'C'], mark_right=False, ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert leg.get_texts()[0].get_text() == 'A'
assert leg.get_texts()[1].get_text() == 'B'
assert leg.get_texts()[2].get_text() == 'C'
assert leg.get_texts()[3].get_text() == 'D'
self.plt.close(fig)
fig, ax = self.plt.subplots()
df.plot(kind='bar', secondary_y=['A'], ax=ax)
leg = ax.get_legend()
assert leg.get_texts()[0].get_text() == 'A (right)'
assert leg.get_texts()[1].get_text() == 'B'
self.plt.close(fig)
fig, ax = self.plt.subplots()
df.plot(kind='bar', secondary_y=['A'], mark_right=False, ax=ax)
leg = ax.get_legend()
assert leg.get_texts()[0].get_text() == 'A'
assert leg.get_texts()[1].get_text() == 'B'
self.plt.close(fig)
fig = self.plt.figure()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['C', 'D'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close(fig)
# non-ts
df = tm.makeDataFrame()
fig = self.plt.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'B'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close()
fig = self.plt.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['C', 'D'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
def test_format_date_axis(self):
rng = date_range('1/1/2012', periods=12, freq='M')
df = DataFrame(np.random.randn(len(rng), 3), rng)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
xaxis = ax.get_xaxis()
for l in xaxis.get_ticklabels():
if len(l.get_text()) > 0:
assert l.get_rotation() == 30
@pytest.mark.slow
def test_ax_plot(self):
x = date_range(start='2012-01-02', periods=10, freq='D')
y = list(range(len(x)))
_, ax = self.plt.subplots()
lines = ax.plot(x, y, label='Y')
tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x)
@pytest.mark.slow
def test_mpl_nopandas(self):
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = dict(fmt='-', lw=4)
_, ax = self.plt.subplots()
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp)
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp)
@pytest.mark.slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
_, ax = self.plt.subplots()
ts_irregular[:5].plot(ax=ax)
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
assert left <= ts_irregular.index.min().toordinal()
assert right >= ts_irregular.index.max().toordinal()
@pytest.mark.slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
_, ax = self.plt.subplots()
s1.plot(ax=ax)
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
assert left_before >= left_after
assert right_before < right_after
@pytest.mark.slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start='2000-01-01', periods=4, freq='D')
index_2 = date_range(start='2000-01-05', periods=4, freq='D')
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
_, ax = self.plt.subplots()
s1.plot(ax=ax)
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
assert left_before >= left_after
assert right_before < right_after
@pytest.mark.slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range('2000-01-01', periods=10000, freq='min')
ts = Series(1, index=rng)
_, ax = self.plt.subplots()
ts.plot(ax=ax)
left_before, right_before = ax.get_xlim()
ts.resample('D').mean().plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
assert left_before == left_after
assert right_before == right_after
@pytest.mark.slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
_, ax = self.plt.subplots()
ts_irregular[:5].plot(ax=ax)
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
assert left <= ts_irregular.index.min().toordinal()
assert right >= ts_irregular.index.max().toordinal()
def test_plot_outofbounds_datetime(self):
# 2579 - checking this does not raise
values = [date(1677, 1, 1), date(1677, 1, 2)]
_, ax = self.plt.subplots()
ax.plot(values)
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
ax.plot(values)
def test_format_timedelta_ticks_narrow(self):
expected_labels = (['00:00:00.0000000{:0>2d}'.format(i)
for i in range(10)])
rng = timedelta_range('0', periods=10, freq='ns')
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
df.plot(fontsize=2, ax=ax)
self.plt.draw()
labels = ax.get_xticklabels()
result_labels = [x.get_text() for x in labels]
assert len(result_labels) == len(expected_labels)
assert result_labels == expected_labels
def test_format_timedelta_ticks_wide(self):
expected_labels = [
'00:00:00',
'1 days 03:46:40',
'2 days 07:33:20',
'3 days 11:20:00',
'4 days 15:06:40',
'5 days 18:53:20',
'6 days 22:40:00',
'8 days 02:26:40',
'9 days 06:13:20',
]
rng = timedelta_range('0', periods=10, freq='1 d')
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
ax = df.plot(fontsize=2, ax=ax)
self.plt.draw()
labels = ax.get_xticklabels()
result_labels = [x.get_text() for x in labels]
assert len(result_labels) == len(expected_labels)
assert result_labels == expected_labels
def test_timedelta_plot(self):
# test issue #8711
s = Series(range(5), timedelta_range('1day', periods=5))
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
# test long period
index = timedelta_range('1 day 2 hr 30 min 10 s',
periods=10, freq='1 d')
s = Series(np.random.randn(len(index)), index)
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
# test short period
index = timedelta_range('1 day 2 hr 30 min 10 s',
periods=10, freq='1 ns')
s = Series(np.random.randn(len(index)), index)
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
def test_hist(self):
# https://github.com/matplotlib/matplotlib/issues/8459
rng = date_range('1/1/2011', periods=10, freq='H')
x = rng
w1 = np.arange(0, 1, .1)
w2 = np.arange(0, 1, .1)[::-1]
_, ax = self.plt.subplots()
ax.hist([x, x], weights=[w1, w2])
@pytest.mark.slow
def test_overlapping_datetime(self):
# GB 6608
s1 = Series([1, 2, 3], index=[datetime(1995, 12, 31),
datetime(2000, 12, 31),
datetime(2005, 12, 31)])
s2 = Series([1, 2, 3], index=[datetime(1997, 12, 31),
datetime(2003, 12, 31),
datetime(2008, 12, 31)])
# plot first series, then add the second series to those axes,
# then try adding the first series again
_, ax = self.plt.subplots()
s1.plot(ax=ax)
s2.plot(ax=ax)
s1.plot(ax=ax)
@pytest.mark.xfail(reason="GH9053 matplotlib does not use"
" ax.xaxis.converter")
def test_add_matplotlib_datetime64(self):
# GH9053 - ensure that a plot with PeriodConverter still understands
# datetime64 data. This still fails because matplotlib overrides the
# ax.xaxis.converter with a DatetimeConverter
s = Series(np.random.randn(10),
index=date_range('1970-01-02', periods=10))
ax = s.plot()
ax.plot(s.index, s.values, color='g')
l1, l2 = ax.lines
tm.assert_numpy_array_equal(l1.get_xydata(), l2.get_xydata())
def test_matplotlib_scatter_datetime64(self):
# https://github.com/matplotlib/matplotlib/issues/11391
df = DataFrame(np.random.RandomState(0).rand(10, 2),
columns=["x", "y"])
df["time"] = date_range("2018-01-01", periods=10, freq="D")
fig, ax = self.plt.subplots()
ax.scatter(x="time", y="y", data=df)
self.plt.draw()
label = ax.get_xticklabels()[0]
if self.mpl_ge_3_0_0:
expected = "2017-12-08"
else:
expected = "2017-12-12"
assert label.get_text() == expected
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop('ax', plt.gca())
orig_axfreq = getattr(orig_ax, 'freq', None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop('ax', plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, DateOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
try:
kwargs['ax'] = ax
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
except Exception:
pass
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
# GH18439
# this is supported only in Python 3 pickle since
# pickle in Python2 doesn't support instancemethod pickling
# TODO(statsmodels 0.10.0): Remove the statsmodels check
# https://github.com/pandas-dev/pandas/issues/24088
# https://github.com/statsmodels/statsmodels/issues/4772
if 'statsmodels' not in sys.modules:
with ensure_clean(return_filelike=True) as path:
pickle.dump(fig, path)
finally:
plt.close(fig)
| bsd-3-clause |
dbarbier/ot-svn | python/doc/sphinxext/numpydoc/tests/test_docscrape.py | 3 | 18530 | # -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys
import textwrap
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal(
[n for n, _, _ in doc['Parameters']], ['mean', 'cov', 'shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N, N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_other_parameters():
assert_equal(len(doc['Other Parameters']), 1)
assert_equal([n for n, _, _ in doc['Other Parameters']], ['spam'])
arg, arg_type, desc = doc['Other Parameters'][0]
assert_equal(arg_type, 'parrot')
assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
assert_equal(len(doc['Returns']), 2)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
assert_equal(arg, 'list of str')
assert_equal(arg_type, '')
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a, b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l.rstrip() for l in a.split('\n') if l.strip()]
b = [l.rstrip() for l in b.split('\n') if l.strip()]
for n, line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n, line, b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N, N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
:Other Parameters:
**spam** : parrot
A parrot off its mortal coil.
:Raises:
**RuntimeError**
Some error
:Warns:
**RuntimeWarning**
Some warning
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
Warns
-----
SomeWarning
If needed
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name, _, desc = doc5['Raises'][0]
assert_equal(name, 'LinAlgException')
assert_equal(desc, ['If array is singular.'])
def test_warns():
assert_equal(len(doc5['Warns']), 1)
name, _, desc = doc5['Warns'][0]
assert_equal(name, 'SomeWarning')
assert_equal(desc, ['If needed'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert isinstance(doc['Summary'][0], str)
assert doc['Summary'][0] == 'öäöäöäöäöåååå'
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
@property
def spammity(self):
"""Spammity index"""
return 0.95
class Ignorable(object):
"""local class, to be ignored"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
assert 'Spammity index' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
# docstring itself.
doc = NumpyDocString(
"""
z(x1, x2)
z(a, theta)
""")
assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
Examples
--------
For usage examples, see `ode`.
"""
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Examples
--------
For usage examples, see `ode`.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
.. index::
""")
def test_class_members_doc_sphinx():
doc = SphinxClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
:Parameters:
**f** : callable ``f(t, y, *f_args)``
Aaa.
**jac** : callable ``jac(t, y, *jac_args)``
Bbb.
.. rubric:: Examples
For usage examples, see `ode`.
.. rubric:: Attributes
=== ==========
t (float) Current time.
y (ndarray) Current variable values.
=== ==========
.. rubric:: Methods
=== ==========
a
b
c
=== ==========
""")
if __name__ == "__main__":
import nose
nose.run()
| gpl-3.0 |
AnasGhrab/scikit-learn | sklearn/tests/test_cross_validation.py | 27 | 41664 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
lgarren/spack | var/spack/repos/builtin/packages/py-yt/package.py | 3 | 4127 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyYt(PythonPackage):
"""Volumetric Data Analysis
yt is a python package for analyzing and visualizing
volumetric, multi-resolution data from astrophysical
simulations, radio telescopes, and a burgeoning
interdisciplinary community.
"""
homepage = "http://yt-project.org"
url = "https://bitbucket.org/yt_analysis/yt"
version("3.3.5", "2ad314ff3d3261e41825d15db027b0e7",
url="https://bitbucket.org/yt_analysis/yt/get/yt-3.3.5.tar.gz")
version("3.3.4", "3a84e56dfd82f9dd923f3fb8490e679c",
url="https://bitbucket.org/yt_analysis/yt/get/yt-3.3.4.tar.gz")
version("3.3.1", hg="https://bitbucket.org/yt_analysis/yt",
tag="yt-3.3.1", commit="9bc3d0e9b750c923d44d73c447df64fc431f5838")
version("3.2.3", hg="https://bitbucket.org/yt_analysis/yt",
tag="yt-3.2.3", commit="83d2c1e9313e7d83eb5b96888451ff2646fd8ff3")
version("3.1.0", hg="https://bitbucket.org/yt_analysis/yt",
tag="yt-3.1.0", commit="fd7cdc4836188a3badf81adb477bcc1b9632e485")
version("3.0.2", hg="https://bitbucket.org/yt_analysis/yt",
tag="yt-3.0.2", commit="511887af4c995a78fe606e58ce8162c88380ecdc")
version("2.6.3", hg="https://bitbucket.org/yt_analysis/yt",
tag="yt-2.6.3", commit="816186f16396a16853810ac9ebcde5057d8d5b1a")
version("development", hg="https://bitbucket.org/yt_analysis/yt",
branch="yt")
variant("astropy", default=True, description="enable astropy support")
variant("h5py", default=True, description="enable h5py support")
variant("scipy", default=True, description="enable scipy support")
variant("rockstar", default=False, description="enable rockstar support")
depends_on("py-astropy", type=('build', 'run'), when="+astropy")
depends_on("py-cython", type=('build', 'run'))
depends_on("py-h5py", type=('build', 'run'), when="+h5py")
depends_on("py-ipython", type=('build', 'run'))
depends_on("py-matplotlib", type=('build', 'run'))
depends_on("py-numpy", type=('build', 'run'))
depends_on("py-scipy", type=('build', 'run'), when="+scipy")
depends_on("py-setuptools", type=('build', 'run'))
depends_on("py-sympy", type=('build', 'run'))
depends_on("rockstar@yt", type=('build', 'run'), when="+rockstar")
depends_on("python@2.7:2.8,3.4:")
@run_before('install')
def prep_yt(self):
if '+rockstar' in self.spec:
with open('rockstar.cfg', 'w') as rockstar_cfg:
rockstar_cfg.write(self.spec['rockstar'].prefix)
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
# The Python interpreter path can be too long for this
# yt = Executable(join_path(prefix.bin, "yt"))
# yt("--help")
python(join_path(self.prefix.bin, "yt"), "--help")
| lgpl-2.1 |
YihaoLu/statsmodels | statsmodels/datasets/statecrime/data.py | 25 | 3128 | #! /usr/bin/env python
"""Statewide Crime Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Public domain."""
TITLE = """Statewide Crime Data 2009"""
SOURCE = """
All data is for 2009 and was obtained from the American Statistical Abstracts except as indicated below.
"""
DESCRSHORT = """State crime data 2009"""
DESCRLONG = DESCRSHORT
#suggested notes
NOTE = """::
Number of observations: 51
Number of variables: 8
Variable name definitions:
state
All 50 states plus DC.
violent
Rate of violent crimes / 100,000 population. Includes murder, forcible
rape, robbery, and aggravated assault. Numbers for Illinois and
Minnesota do not include forcible rapes. Footnote included with the
American Statistical Abstract table reads:
"The data collection methodology for the offense of forcible
rape used by the Illinois and the Minnesota state Uniform Crime
Reporting (UCR) Programs (with the exception of Rockford, Illinois,
and Minneapolis and St. Paul, Minnesota) does not comply with
national UCR guidelines. Consequently, their state figures for
forcible rape and violent crime (of which forcible rape is a part)
are not published in this table."
murder
Rate of murders / 100,000 population.
hs_grad
Precent of population having graduated from high school or higher.
poverty
% of individuals below the poverty line
white
Percent of population that is one race - white only. From 2009 American
Community Survey
single
Calculated from 2009 1-year American Community Survey obtained obtained
from Census. Variable is Male householder, no wife present, family
household combined with Female household, no husband prsent, family
household, divided by the total number of Family households.
urban
% of population in Urbanized Areas as of 2010 Census. Urbanized
Areas are area of 50,000 or more people."""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the statecrime data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=2, exog_idx=[7, 4, 3, 5],
dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=2, exog_idx=[7,4,3,5],
dtype=float, index_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/statecrime.csv', 'rb'),
delimiter=",", names=True, dtype=None)
return data
| bsd-3-clause |
JRock007/boxxy | dist/Boxxy.app/Contents/Resources/lib/python2.7/numpy/lib/twodim_base.py | 37 | 26758 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return v.diagonal(k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/sparse/scipy_sparse.py | 18 | 5516 | """
Interaction with scipy.sparse matrices.
Currently only includes SparseSeries.to_coo helpers.
"""
from pandas.core.index import MultiIndex, Index
from pandas.core.series import Series
from pandas.compat import OrderedDict, lmap
def _check_is_partition(parts, whole):
whole = set(whole)
parts = [set(x) for x in parts]
if set.intersection(*parts) != set():
raise ValueError(
'Is not a partition because intersection is not null.')
if set.union(*parts) != whole:
raise ValueError('Is not a partition because union is not the whole.')
def _to_ijv(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
""" For arbitrary (MultiIndexed) SparseSeries return
(v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for
passing to scipy.sparse.coo constructor. """
# index and column levels must be a partition of the index
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
# from the SparseSeries: get the labels and data for non-null entries
values = ss._data.internal_values()._valid_sp_values
nonnull_labels = ss.dropna()
def get_indexers(levels):
""" Return sparse coords and dense labels for subset levels """
# TODO: how to do this better? cleanly slice nonnull_labels given the
# coord
values_ilabels = [tuple(x[i] for i in levels)
for x in nonnull_labels.index]
if len(levels) == 1:
values_ilabels = [x[0] for x in values_ilabels]
# # performance issues with groupby ###################################
# TODO: these two lines can rejplace the code below but
# groupby is too slow (in some cases at least)
# labels_to_i = ss.groupby(level=levels, sort=sort_labels).first()
# labels_to_i[:] = np.arange(labels_to_i.shape[0])
def _get_label_to_i_dict(labels, sort_labels=False):
""" Return OrderedDict of unique labels to number.
Optionally sort by label.
"""
labels = Index(lmap(tuple, labels)).unique().tolist() # squish
if sort_labels:
labels = sorted(list(labels))
d = OrderedDict((k, i) for i, k in enumerate(labels))
return (d)
def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
def robust_get_level_values(i):
# if index has labels (that are not None) use those,
# else use the level location
try:
return index.get_level_values(index.names[i])
except KeyError:
return index.get_level_values(i)
ilabels = list(zip(*[robust_get_level_values(i) for i in subset]))
labels_to_i = _get_label_to_i_dict(ilabels,
sort_labels=sort_labels)
labels_to_i = Series(labels_to_i)
if len(subset) > 1:
labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index)
labels_to_i.index.names = [index.names[i] for i in subset]
labels_to_i.name = 'value'
return (labels_to_i)
labels_to_i = _get_index_subset_to_coord_dict(ss.index, levels,
sort_labels=sort_labels)
# #####################################################################
# #####################################################################
i_coord = labels_to_i[values_ilabels].tolist()
i_labels = labels_to_i.index.tolist()
return i_coord, i_labels
i_coord, i_labels = get_indexers(row_levels)
j_coord, j_labels = get_indexers(column_levels)
return values, i_coord, j_coord, i_labels, j_labels
def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ),
sort_labels=False):
""" Convert a SparseSeries to a scipy.sparse.coo_matrix using index
levels row_levels, column_levels as the row and column
labels respectively. Returns the sparse_matrix, row and column labels.
"""
import scipy.sparse
if ss.index.nlevels < 2:
raise ValueError('to_coo requires MultiIndex with nlevels > 2')
if not ss.index.is_unique:
raise ValueError('Duplicate index entries are not allowed in to_coo '
'transformation.')
# to keep things simple, only rely on integer indexing (not labels)
row_levels = [ss.index._get_level_number(x) for x in row_levels]
column_levels = [ss.index._get_level_number(x) for x in column_levels]
v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels,
column_levels=column_levels,
sort_labels=sort_labels)
sparse_matrix = scipy.sparse.coo_matrix(
(v, (i, j)), shape=(len(rows), len(columns)))
return sparse_matrix, rows, columns
def _coo_to_sparse_series(A, dense_index=False):
""" Convert a scipy.sparse.coo_matrix to a SparseSeries.
Use the defaults given in the SparseSeries constructor.
"""
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
s = s.sort_index()
s = s.to_sparse() # TODO: specify kind?
if dense_index:
# is there a better constructor method to use here?
i = range(A.shape[0])
j = range(A.shape[1])
ind = MultiIndex.from_product([i, j])
s = s.reindex_axis(ind)
return s
| mit |
tim777z/seaborn | doc/sphinxext/plot_generator.py | 38 | 10035 | """
Sphinx plugin to run example scripts and create a gallery page.
Lightly modified from the mpld3 project.
"""
from __future__ import division
import os
import os.path as op
import re
import glob
import token
import tokenize
import shutil
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import image
RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
.. image:: {img_file}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
position: relative;
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure:hover img {{
-webkit-filter: blur(3px);
-moz-filter: blur(3px);
-o-filter: blur(3px);
-ms-filter: blur(3px);
filter: blur(3px);
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure span {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
background: #000;
color: #fff;
visibility: hidden;
opacity: 0;
z-index: 100;
}}
.figure p {{
position: absolute;
top: 45%;
width: 170px;
font-size: 110%;
}}
.figure:hover span {{
visibility: visible;
opacity: .4;
}}
.caption {{
position: absolue;
width: 180px;
top: 170px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
def create_thumbnail(infile, thumbfile,
width=300, height=300,
cx=0.5, cy=0.5, border=4):
baseout, extout = op.splitext(thumbfile)
im = image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - .5 * width)
y0 = int(cy * rows - .5 * height)
xslice = slice(x0, x0 + width)
yslice = slice(y0, y0 + height)
thumb = im[yslice, xslice]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
dpi = 100
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator(object):
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.thumbloc = .5, .5
self.extract_docstring()
with open(filename, "r") as fid:
self.filetext = fid.read()
outfilename = op.join(target_dir, self.rstfilename)
# Only actually run it if the output RST file doesn't
# exist or it was modified less recently than the example
if (not op.exists(outfilename)
or (op.getmtime(outfilename) < op.getmtime(filename))):
self.exec_file()
else:
print("skipping {0}".format(self.filename))
@property
def dirname(self):
return op.split(self.filename)[0]
@property
def fname(self):
return op.split(self.filename)[1]
@property
def modulename(self):
return op.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
pngfile = self.modulename + '.png'
return "_images/" + pngfile
@property
def thumbfilename(self):
pngfile = self.modulename + '_thumb.png'
return pngfile
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
@property
def plotfunc(self):
match = re.search(r"sns\.(.+plot)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+map)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+Grid)\(", self.filetext)
if match:
return match.group(1)
return ""
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split("\n")):
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n")
if not l.startswith("_thumb")])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print("running {0}".format(self.filename))
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
fig.canvas.draw()
pngfile = op.join(self.target_dir, self.pngfilename)
thumbfile = op.join("example_thumbs", self.thumbfilename)
self.html = "<img src=../%s>" % self.pngfilename
fig.savefig(pngfile, dpi=75, bbox_inches="tight")
cx, cy = self.thumbloc
create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
def toctree_entry(self):
return " ./%s\n\n" % op.splitext(self.htmlfilename)[0]
def contents_entry(self):
return (".. raw:: html\n\n"
" <div class='figure align-center'>\n"
" <a href=./{0}>\n"
" <img src=../_static/{1}>\n"
" <span class='figure-label'>\n"
" <p>{2}</p>\n"
" </span>\n"
" </a>\n"
" </div>\n\n"
"\n\n"
"".format(self.htmlfilename,
self.thumbfilename,
self.plotfunc))
def main(app):
static_dir = op.join(app.builder.srcdir, '_static')
target_dir = op.join(app.builder.srcdir, 'examples')
image_dir = op.join(app.builder.srcdir, 'examples/_images')
thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
source_dir = op.abspath(op.join(app.builder.srcdir,
'..', 'examples'))
if not op.exists(static_dir):
os.makedirs(static_dir)
if not op.exists(target_dir):
os.makedirs(target_dir)
if not op.exists(image_dir):
os.makedirs(image_dir)
if not op.exists(thumb_dir):
os.makedirs(thumb_dir)
if not op.exists(source_dir):
os.makedirs(source_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in glob.glob(op.join(source_dir, "*.py")):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": op.join('examples', ex.htmlfilename),
"thumb": op.join(ex.thumbfilename)})
shutil.copyfile(filename, op.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=ex.pyfilename,
img_file=ex.pngfilename)
with open(op.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = op.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example_gallery",
toctree=toctree,
contents=contents))
def setup(app):
app.connect('builder-inited', main)
| bsd-3-clause |
jonpdx/gsoc2014 | contour sem tiff down sample.py | 1 | 2985 | # -*- coding: utf-8 -*-
# <nbformat>2</nbformat>
# <codecell>
###############################################################################
# Import Libraries
import time as time
import numpy as np
import scipy as sp
import pylab as pl
import Image
#
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import Ward
# <codecell>
###############################################################################
# Get SEM Data
#
#get the file path
tifPath = '/home/jon/Desktop/gsoc2014/semImages/'
tifFile = 'CML0615(13).tif'
tifPathAndFile = tifPath + tifFile
#
#import in the tif
im = Image.open(tifPathAndFile)
im.show()
# <codecell>
###############################################################################
#Check out the shape and size of the tif
imarray = np.array(im)
#
print imarray.shape
#(44, 330)
print im.size
#(330, 44)
# <codecell>
###############################################################################
#crop the tiff
box=(50, 50, 200, 200)
#
im_crop=im.crop(box)
#
im_crop.show()
###############################################################################
#reassign im
im = im_crop
imarray = np.array(im)
#
print imarray.shape
#
print im.size
# <codecell>
###############################################################################
# Convert Tif into "LENA"
lena = im
# <codecell>
###############################################################################
# Show Tiff
pl.figure(figsize=(5, 5))
pl.imshow(lena, cmap=pl.cm.gray)
pl.xticks(())
pl.yticks(())
pl.show()
# <codecell>
###############################################################################
# Generate data
#lena = sp.misc.lena()
# Downsample the image by a factor of 4
#lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
#X = np.reshape(lena, (-1, 1))
# <codecell>
X = np.reshape(lena, (-1, 1))
# <codecell>
print imarray.shape
# <codecell>
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*imarray.shape)
# <codecell>
###############################################################################
# Compute clustering
print "Compute structured hierarchical clustering..."
#
st = time.time()
#
n_clusters = 5 # number of regions
#
ward = Ward(n_clusters=n_clusters, connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, imarray.shape)
#
print "Elaspsed time: ", time.time() - st
print "Number of pixels: ", label.size
print "Number of clusters: ", np.unique(label).size
# <codecell>
###############################################################################
# Plot the results on an image
pl.figure(figsize=(5, 5))
pl.imshow(lena, cmap=pl.cm.gray)
#
for l in range(n_clusters):
pl.contour(label == l, contours=1,
colors=[pl.cm.spectral(l / float(n_clusters)), ])
#
pl.xticks(())
pl.yticks(())
#
pl.show()
# <codecell>
| gpl-2.0 |
cjermain/numpy | numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
wangyum/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 20 | 12315 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.rnn_common import PredictionType
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
TNick/pylearn2 | pylearn2/scripts/papers/jia_huang_wkshp_11/evaluate.py | 44 | 3208 | from __future__ import print_function
from optparse import OptionParser
import warnings
try:
from sklearn.metrics import classification_report
except ImportError:
classification_report = None
warnings.warn("couldn't find sklearn.metrics.classification_report")
try:
from sklearn.metrics import confusion_matrix
except ImportError:
confusion_matrix = None
warnings.warn("couldn't find sklearn.metrics.metrics.confusion_matrix")
from galatea.s3c.feature_loading import get_features
from pylearn2.utils import serial
from pylearn2.datasets.cifar10 import CIFAR10
from pylearn2.datasets.cifar100 import CIFAR100
import numpy as np
def test(model, X, y):
print("Evaluating svm")
y_pred = model.predict(X)
#try:
if True:
acc = (y == y_pred).mean()
print("Accuracy ",acc)
"""except:
print("something went wrong")
print('y:')
print(y)
print('y_pred:')
print(y_pred)
print('extra info')
print(type(y))
print(type(y_pred))
print(y.dtype)
print(y_pred.dtype)
print(y.shape)
print(y_pred.shape)
raise
"""
#
def get_test_labels(cifar10, cifar100, stl10):
assert cifar10 + cifar100 + stl10 == 1
if stl10:
print('loading entire stl-10 test set just to get the labels')
stl10 = serial.load("${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/test.pkl")
return stl10.y
if cifar10:
print('loading entire cifar10 test set just to get the labels')
cifar10 = CIFAR10(which_set = 'test')
return np.asarray(cifar10.y)
if cifar100:
print('loading entire cifar100 test set just to get the fine labels')
cifar100 = CIFAR100(which_set = 'test')
return np.asarray(cifar100.y_fine)
assert False
def main(model_path,
test_path,
dataset,
**kwargs):
model = serial.load(model_path)
cifar100 = dataset == 'cifar100'
cifar10 = dataset == 'cifar10'
stl10 = dataset == 'stl10'
assert cifar10 + cifar100 + stl10 == 1
y = get_test_labels(cifar10, cifar100, stl10)
X = get_features(test_path, False, False)
if stl10:
num_examples = 8000
if cifar10 or cifar100:
num_examples = 10000
if not X.shape[0] == num_examples:
raise AssertionError('Expected %d examples but got %d' % (num_examples, X.shape[0]))
assert y.shape[0] == num_examples
test(model,X,y)
if __name__ == '__main__':
"""
Useful for quick tests.
Usage: python train_bilinear.py
"""
parser = OptionParser()
parser.add_option("-m", "--model",
action="store", type="string", dest="model_path")
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-o", action="store", dest="output", default = None, help="path to write the report to")
parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None)
#(options, args) = parser.parse_args()
#assert options.output
main(model_path='final_model.pkl',
test_path='test_features.npy',
dataset = 'cifar100',
)
| bsd-3-clause |
weigq/pytorch-pose | evaluation/utils.py | 2 | 1239 |
def visualize(oriImg, points, pa):
import matplotlib
import cv2 as cv
import matplotlib.pyplot as plt
import math
fig = matplotlib.pyplot.gcf()
# fig.set_size_inches(12, 12)
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170,0,255],[255,0,255]]
canvas = oriImg
stickwidth = 4
x = points[:, 0]
y = points[:, 1]
for n in range(len(x)):
for child in range(len(pa)):
if pa[child] is 0:
continue
x1 = x[pa[child] - 1]
y1 = y[pa[child] - 1]
x2 = x[child]
y2 = y[child]
cv.line(canvas, (x1, y1), (x2, y2), colors[child], 8)
plt.imshow(canvas[:, :, [2, 1, 0]])
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(12, 12)
from time import gmtime, strftime
import os
directory = 'data/mpii/result/test_images'
if not os.path.exists(directory):
os.makedirs(directory)
fn = os.path.join(directory, strftime("%Y-%m-%d-%H_%M_%S", gmtime()) + '.jpg')
plt.savefig(fn) | gpl-3.0 |
sangwook236/SWDT | sw_dev/python/rnd/test/machine_vision/opencv/opencv_transformation.py | 2 | 12844 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import math
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
# REF [site] >> https://docs.opencv.org/master/da/d6e/tutorial_py_geometric_transformations.html
def geometric_transformation():
#--------------------
# Scaling.
img = cv.imread('../../../data/machine_vision/messi5.jpg', cv.IMREAD_COLOR)
height, width = img.shape[:2]
res = cv.resize(img, None, fx=2, fy=2, interpolation=cv.INTER_CUBIC)
res = cv.resize(img, (2 * width, 2 * height), interpolation=cv.INTER_CUBIC)
#--------------------
# Translation.
img = cv.imread('../../../data/machine_vision/messi5.jpg', cv.IMREAD_GRAYSCALE)
rows, cols = img.shape
M = np.float32([[1, 0, 100], [0, 1, 50]])
dst = cv.warpAffine(img, M, (cols, rows), flags=cv.INTER_LINEAR)
cv.imshow('img', dst)
cv.waitKey(0)
cv.destroyAllWindows()
#--------------------
# Rotation.
img = cv.imread('../../../data/machine_vision/messi5.jpg', cv.IMREAD_GRAYSCALE)
rows, cols = img.shape
# cols-1 and rows-1 are the coordinate limits.
M = cv.getRotationMatrix2D(((cols - 1) / 2.0, (rows - 1) / 2.0), 90, scale=1)
dst = cv.warpAffine(img, M, (cols, rows), flags=cv.INTER_LINEAR)
#--------------------
# Affine transformation.
img = cv.imread('../../../data/machine_vision/drawing.png', cv.IMREAD_COLOR)
rows, cols, ch = img.shape
pts1 = np.float32([[50, 50], [200, 50], [50, 200]])
pts2 = np.float32([[10, 100], [200, 50], [100, 250]])
M = cv.getAffineTransform(pts1, pts2)
dst = cv.warpAffine(img, M, (cols, rows), flags=cv.INTER_LINEAR)
plt.subplot(121), plt.imshow(img), plt.title('Input')
plt.subplot(122), plt.imshow(dst), plt.title('Output')
plt.show()
#--------------------
# Perspective transformation.
img = cv.imread('../../../data/machine_vision/sudoku.png', cv.IMREAD_COLOR)
rows, cols, ch = img.shape
pts1 = np.float32([[56, 65], [368, 52], [28, 387], [389, 390]])
pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
M = cv.getPerspectiveTransform(pts1, pts2, solveMethod=cv.DECOMP_LU)
dst = cv.warpPerspective(img, M, (300, 300), flags=cv.INTER_LINEAR)
plt.subplot(121), plt.imshow(img), plt.title('Input')
plt.subplot(122), plt.imshow(dst), plt.title('Output')
plt.show()
def ellipse_transformation_example():
#for rangle in range(0, 360, 10):
for rangle in range(-90, 90, 10):
rgb = np.zeros((500, 500, 3), np.uint8)
cv.ellipse(rgb, (300, 200), (100, 50), rangle, 0, 360, (127, 127, 127), cv.FILLED, cv.LINE_8)
pts = np.stack(np.nonzero(rgb)).transpose()[:,[1, 0]]
# Rotated rectangle (OBB).
obb = cv.minAreaRect(pts) # Tuple: (center, size, angle).
obb_center, obb_size, obb_angle = obb
obb_pts = cv.boxPoints(obb) # 4 x 2. np.float32.
#obb_pts = np.int0(obb_pts)
# Straight bounding rectangle (AABB).
aabb = cv.boundingRect(pts) # x (left), y (top), width, height.
aabb_x1, aabb_y1, aabb_x2, aabb_y2 = aabb[0], aabb[1], aabb[0] + aabb[2], aabb[1] + aabb[3]
# The largest bbox.
(left, top), (right, bottom) = np.min(obb_pts, axis=0), np.max(obb_pts, axis=0)
cv.rectangle(rgb, (left, top), (right, bottom), (255, 255, 0), 2, cv.LINE_8)
cv.rectangle(rgb, (aabb_x1, aabb_y1), (aabb_x2, aabb_y2), (0, 255, 255), 2, cv.LINE_8)
if False:
cv.drawContours(rgb, [np.int0(obb_pts)], 0, (0, 0, 255), 2, cv.LINE_8)
else:
cv.line(rgb, (math.floor(obb_pts[1,0]), math.floor(obb_pts[1,1])), (math.floor(obb_pts[2,0]), math.floor(obb_pts[2,1])), (0, 0, 255), 2, cv.LINE_8)
cv.line(rgb, (math.floor(obb_pts[2,0]), math.floor(obb_pts[2,1])), (math.floor(obb_pts[3,0]), math.floor(obb_pts[3,1])), (0, 255, 0), 2, cv.LINE_8)
cv.line(rgb, (math.floor(obb_pts[3,0]), math.floor(obb_pts[3,1])), (math.floor(obb_pts[0,0]), math.floor(obb_pts[0,1])), (255, 0, 0), 2, cv.LINE_8)
cv.line(rgb, (math.floor(obb_pts[0,0]), math.floor(obb_pts[0,1])), (math.floor(obb_pts[1,0]), math.floor(obb_pts[1,1])), (255, 0, 255), 2, cv.LINE_8)
cv.imshow('Input Image', rgb)
#--------------------
# Rotation.
rot_size, rot_angle = obb_size, obb_angle
# TODO [check] >>
if obb_size[0] >= obb_size[1]:
#if obb_angle < -10 or obb_angle > 10:
rot_size, rot_angle = obb_size[1::-1], obb_angle + 90
if False:
# Too big.
dia = math.ceil(math.sqrt(rgb.shape[0]**2 + rgb.shape[1]**2))
R = cv.getRotationMatrix2D(obb_center, angle=rot_angle, scale=1)
rotated = cv.warpAffine(rgb, R, (dia, dia), flags=cv.INTER_LINEAR)
rotated_patch = rotated[math.floor(obb_center[1] - rot_size[1] / 2):math.ceil(obb_center[1] + rot_size[1] / 2), math.floor(obb_center[0] - rot_size[0] / 2):math.ceil(obb_center[0] + rot_size[0] / 2)]
elif False:
# Trimmed.
patch_x1, patch_y1, patch_x2, patch_y2 = max(0, math.floor(aabb_x1) - 1), max(0, math.floor(aabb_y1) - 1), min(rgb.shape[1], math.ceil(aabb_x2) + 1), min(rgb.shape[0], math.ceil(aabb_y2) + 1)
#patch_x1, patch_y1, patch_x2, patch_y2 = max(0, math.floor(aabb_x1)), max(0, math.floor(aabb_y1)), min(rgb.shape[1], math.ceil(aabb_x2) + 1), min(rgb.shape[0], math.ceil(aabb_y2) + 1)
patch = rgb[patch_y1:patch_y2, patch_x1:patch_x2]
ctr = patch.shape[1] / 2, patch.shape[0] / 2
dia = math.ceil(math.sqrt(patch.shape[0]**2 + patch.shape[1]**2))
R = cv.getRotationMatrix2D(ctr, angle=rot_angle, scale=1)
rotated = cv.warpAffine(patch, R, (dia, dia), flags=cv.INTER_LINEAR)
rotated_patch = rotated[:patch.shape[0],:patch.shape[1]]
else:
radius = math.sqrt(rot_size[0]**2 + rot_size[1]**2) / 2
dia = math.ceil(radius * 2)
patch_x1, patch_y1, patch_x2, patch_y2 = max(0, math.floor(obb_center[0] - radius) - 1), max(0, math.floor(obb_center[1] - radius) - 1), min(rgb.shape[1], math.ceil(obb_center[0] + radius) + 1), min(rgb.shape[0], math.ceil(obb_center[1] + radius) + 1)
#patch_x1, patch_y1, patch_x2, patch_y2 = max(0, math.floor(obb_center[0] - radius)), max(0, math.floor(obb_center[1] - radius)), min(rgb.shape[1], math.ceil(obb_center[0] + radius) + 1), min(rgb.shape[0], math.ceil(obb_center[1] + radius) + 1)
patch = rgb[patch_y1:patch_y2, patch_x1:patch_x2]
ctr = patch.shape[1] / 2, patch.shape[0] / 2
R = cv.getRotationMatrix2D(ctr, angle=rot_angle, scale=1)
rotated = cv.warpAffine(patch, R, (dia, dia), flags=cv.INTER_LINEAR)
rotated_patch = rotated[math.floor(ctr[1] - rot_size[1] / 2):math.ceil(ctr[1] + rot_size[1] / 2), math.floor(ctr[0] - rot_size[0] / 2):math.ceil(ctr[0] + rot_size[0] / 2)]
cv.imshow('Rotated Image (OBB)', rotated)
cv.imshow('Rotated Image (OBB, Patched)', rotated_patch)
patch_x1, patch_y1, patch_x2, patch_y2 = max(0, math.floor(aabb_x1) - 1), max(0, math.floor(aabb_y1) - 1), min(rgb.shape[1], math.ceil(aabb_x2) + 1), min(rgb.shape[0], math.ceil(aabb_y2) + 1)
#patch_x1, patch_y1, patch_x2, patch_y2 = max(0, math.floor(aabb_x1)), max(0, math.floor(aabb_y1)), min(rgb.shape[1], math.ceil(aabb_x2) + 1), min(rgb.shape[0], math.ceil(aabb_y2) + 1)
patch = rgb[patch_y1:patch_y2, patch_x1:patch_x2]
ctr = patch.shape[1] / 2, patch.shape[0] / 2
#ctr = tuple(np.array(patch.shape[1::-1]) / 2)
R = cv.getRotationMatrix2D(ctr, angle=rot_angle, scale=1)
rotated = cv.warpAffine(patch, R, (300, 150), flags=cv.INTER_LINEAR)
cv.imshow('Rotated Image (AABB)', rotated)
import scipy.ndimage
rotated_scipy = scipy.ndimage.rotate(patch, rot_angle, reshape=True)
ctr = rotated_scipy.shape[1] / 2, rotated_scipy.shape[0] / 2
rotated_scipy_patch = rotated_scipy[math.floor(ctr[1] - rot_size[1] / 2):math.ceil(ctr[1] + rot_size[1] / 2), math.floor(ctr[0] - rot_size[0] / 2):math.ceil(ctr[0] + rot_size[0] / 2)]
cv.imshow('Rotated Image (scipy)', rotated_scipy)
cv.imshow('Rotated Image (scipy, Patch)', rotated_scipy_patch)
#--------------------
# Affine transformation.
if False:
if obb_size[0] >= obb_size[1]:
target_pts = np.float32([[0, 100], [0, 0], [100, 0], [100, 100]])
else:
target_pts = np.float32([[100, 100], [0, 100], [0, 0], [100, 0]])
canvas_size = 300, 150
else:
if obb_size[0] >= obb_size[1]:
target_pts = np.float32([[0, obb_size[1]], [0, 0], [obb_size[0], 0], [obb_size[0], obb_size[1]]])
canvas_size = round(obb_size[0]), round(obb_size[1])
else:
target_pts = np.float32([[obb_size[1], obb_size[0]], [0, obb_size[0]], [0, 0], [obb_size[1], 0]])
canvas_size = round(obb_size[1]), round(obb_size[0])
A = cv.getAffineTransform(obb_pts[:3], target_pts[:3]) # Three points.
warped = cv.warpAffine(rgb, A, canvas_size, flags=cv.INTER_LINEAR)
cv.imshow('Affinely Warped Image', warped)
#--------------------
# Perspective transformation.
T = cv.getPerspectiveTransform(obb_pts, target_pts, solveMethod=cv.DECOMP_LU) # Four points.
warped = cv.warpPerspective(rgb, T, canvas_size, flags=cv.INTER_LINEAR)
cv.imshow('Perspectively Warped Image', warped)
cv.waitKey(0)
cv.destroyAllWindows()
def measure_time():
import scipy.ndimage
rgb = np.zeros((500, 500, 3), np.uint8)
cv.ellipse(rgb, (300, 200), (100, 50), 30, 0, 360, (255, 0, 255), cv.FILLED, cv.LINE_8)
pts = np.stack(np.nonzero(rgb)).transpose()[:,[1, 0]]
# Rotated rectangle (OBB).
obb = cv.minAreaRect(pts) # Tuple: (center, size, angle).
obb_pts = cv.boxPoints(obb) # 4 x 2. np.float32.
#obb_pts = np.int0(obb_pts)
obb_center, obb_size, obb_angle = obb
#obb_size, obb_angle = obb_size[1::-1], obb_angle + 90
import time
num_iterations = 1000
print('Start measuring rotation (opencv)...')
start_time = time.time()
for _ in range(num_iterations):
radius = math.sqrt(obb_size[0]**2 + obb_size[1]**2) / 2
dia = math.ceil(radius * 2)
patch_x1, patch_y1, patch_x2, patch_y2 = max(0, math.floor(obb_center[0] - radius) - 1), max(0, math.floor(obb_center[1] - radius) - 1), min(rgb.shape[1], math.ceil(obb_center[0] + radius) + 1), min(rgb.shape[0], math.ceil(obb_center[1] + radius) + 1)
#patch_x1, patch_y1, patch_x2, patch_y2 = max(0, math.floor(obb_center[0] - radius)), max(0, math.floor(obb_center[1] - radius)), min(rgb.shape[1], math.ceil(obb_center[0] + radius) + 1), min(rgb.shape[0], math.ceil(obb_center[1] + radius) + 1)
patch = rgb[patch_y1:patch_y2, patch_x1:patch_x2]
ctr = patch.shape[1] / 2, patch.shape[0] / 2
R = cv.getRotationMatrix2D(ctr, angle=obb_angle, scale=1)
rotated = cv.warpAffine(patch, R, (dia, dia), flags=cv.INTER_LINEAR)
rotated_patch = rotated[math.floor(ctr[1] - obb_size[1] / 2):math.ceil(ctr[1] + obb_size[1] / 2), math.floor(ctr[0] - obb_size[0] / 2):math.ceil(ctr[0] + obb_size[0] / 2)]
print('End measuring rotation (opencv): {} secs.'.format(time.time() - start_time))
print('Start measuring rotation (scipy)...')
start_time = time.time()
for _ in range(num_iterations):
aabb = cv.boundingRect(pts) # x (left), y (top), width, height.
aabb_x1, aabb_y1, aabb_x2, aabb_y2 = aabb[0], aabb[1], aabb[0] + aabb[2], aabb[1] + aabb[3]
patch_x1, patch_y1, patch_x2, patch_y2 = max(0, math.floor(aabb_x1) - 1), max(0, math.floor(aabb_y1) - 1), min(rgb.shape[1], math.ceil(aabb_x2) + 1), min(rgb.shape[0], math.ceil(aabb_y2) + 1)
#patch_x1, patch_y1, patch_x2, patch_y2 = max(0, math.floor(aabb_x1)), max(0, math.floor(aabb_y1)), min(rgb.shape[1], math.ceil(aabb_x2) + 1), min(rgb.shape[0], math.ceil(aabb_y2) + 1)
patch = rgb[patch_y1:patch_y2, patch_x1:patch_x2]
rotated_scipy = scipy.ndimage.rotate(patch, obb_angle, reshape=True)
ctr = rotated_scipy.shape[1] / 2, rotated_scipy.shape[0] / 2
rotated_scipy_patch = rotated_scipy[math.floor(ctr[1] - obb_size[1] / 2):math.ceil(ctr[1] + obb_size[1] / 2), math.floor(ctr[0] - obb_size[0] / 2):math.ceil(ctr[0] + obb_size[0] / 2)]
print('End measuring rotation (scipy): {} secs.'.format(time.time() - start_time))
print('Start measuring affine transformation...')
start_time = time.time()
for _ in range(num_iterations):
target_pts = np.float32([[0, obb_size[1]], [0, 0], [obb_size[0], 0]])
A = cv.getAffineTransform(obb_pts[:3], target_pts) # Three points.
warped = cv.warpAffine(rgb, A, (round(obb_size[0]), round(obb_size[1])), flags=cv.INTER_LINEAR)
print('End measuring affine transformation: {} secs.'.format(time.time() - start_time))
print('Start measuring perspective transformation...')
start_time = time.time()
for _ in range(num_iterations):
target_pts = np.float32([[0, obb_size[1]], [0, 0], [obb_size[0], 0], [obb_size[0], obb_size[1]]])
T = cv.getPerspectiveTransform(obb_pts, target_pts, solveMethod=cv.DECOMP_LU) # Four points.
warped = cv.warpPerspective(rgb, T, (round(obb_size[0]), round(obb_size[1])), flags=cv.INTER_LINEAR)
print('End measuring perspective transformation: {} secs.'.format(time.time() - start_time))
def main():
#geometric_transformation()
ellipse_transformation_example()
#measure_time()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-3.0 |
magne-max/zipline-ja | zipline/lib/labelarray.py | 1 | 20669 | """
An ndarray subclass for working with arrays of strings.
"""
from functools import partial
from operator import eq, ne
import re
import numpy as np
from numpy import ndarray
import pandas as pd
from toolz import compose
from zipline.utils.compat import unicode
from zipline.utils.preprocess import preprocess
from zipline.utils.sentinel import sentinel
from zipline.utils.input_validation import (
coerce,
expect_kinds,
expect_types,
optional,
)
from zipline.utils.numpy_utils import (
bool_dtype,
int_dtype_with_size_in_bytes,
is_object,
)
from ._factorize import (
factorize_strings,
factorize_strings_known_categories,
)
def compare_arrays(left, right):
"Eq check with a short-circuit for identical objects."
return (
left is right
or ((left.shape == right.shape) and (left == right).all())
)
def _make_unsupported_method(name):
def method(*args, **kwargs):
raise NotImplementedError(
"Method %s is not supported on LabelArrays." % name
)
method.__name__ = name
method.__doc__ = "Unsupported LabelArray Method: %s" % name
return method
class MissingValueMismatch(ValueError):
"""
Error raised on attempt to perform operations between LabelArrays with
mismatched missing_values.
"""
def __init__(self, left, right):
super(MissingValueMismatch, self).__init__(
"LabelArray missing_values don't match:"
" left={}, right={}".format(left, right)
)
class CategoryMismatch(ValueError):
"""
Error raised on attempt to perform operations between LabelArrays with
mismatched category arrays.
"""
def __init__(self, left, right):
(mismatches,) = np.where(left != right)
assert len(mismatches), "Not actually a mismatch!"
super(CategoryMismatch, self).__init__(
"LabelArray categories don't match:\n"
"Mismatched Indices: {mismatches}\n"
"Left: {left}\n"
"Right: {right}".format(
mismatches=mismatches,
left=left[mismatches],
right=right[mismatches],
)
)
_NotPassed = sentinel('_NotPassed')
class LabelArray(ndarray):
"""
An ndarray subclass for working with arrays of strings.
Factorizes the input array into integers, but overloads equality on strings
to check against the factor label.
Parameters
----------
values : array-like
Array of values that can be passed to np.asarray with dtype=object.
missing_value : str
Scalar value to treat as 'missing' for operations on ``self``.
categories : list[str], optional
List of values to use as categories. If not supplied, categories will
be inferred as the unique set of entries in ``values``.
sort : bool, optional
Whether to sort categories. If sort is False and categories is
supplied, they are left in the order provided. If sort is False and
categories is None, categories will be constructed in a random order.
Attributes
----------
categories : ndarray[str]
An array containing the unique labels of self.
reverse_categories : dict[str -> int]
Reverse lookup table for ``categories``. Stores the index in
``categories`` at which each entry each unique entry is found.
missing_value : str or None
A sentinel missing value with NaN semantics for comparisons.
Notes
-----
Consumers should be cautious when passing instances of LabelArray to numpy
functions. We attempt to disallow as many meaningless operations as
possible, but since a LabelArray is just an ndarray of ints with some
additional metadata, many numpy functions (for example, trigonometric) will
happily accept a LabelArray and treat its values as though they were
integers.
In a future change, we may be able to disallow more numerical operations by
creating a wrapper dtype which doesn't register an implementation for most
numpy ufuncs. Until that change is made, consumers of LabelArray should
assume that it is undefined behavior to pass a LabelArray to any numpy
ufunc that operates on semantically-numerical data.
See Also
--------
http://docs.scipy.org/doc/numpy-1.10.0/user/basics.subclassing.html
"""
SUPPORTED_SCALAR_TYPES = (bytes, unicode, type(None))
@preprocess(
values=coerce(list, partial(np.asarray, dtype=object)),
categories=coerce(np.ndarray, list),
)
@expect_types(
values=np.ndarray,
missing_value=SUPPORTED_SCALAR_TYPES,
categories=optional(list),
)
@expect_kinds(values=("O", "S", "U"))
def __new__(cls,
values,
missing_value,
categories=None,
sort=True):
# Numpy's fixed-width string types aren't very efficient. Working with
# object arrays is faster than bytes or unicode arrays in almost all
# cases.
if not is_object(values):
values = values.astype(object)
if categories is None:
codes, categories, reverse_categories = factorize_strings(
values.ravel(),
missing_value=missing_value,
sort=sort,
)
else:
codes, categories, reverse_categories = (
factorize_strings_known_categories(
values.ravel(),
categories=categories,
missing_value=missing_value,
sort=sort,
)
)
categories.setflags(write=False)
return cls._from_codes_and_metadata(
codes=codes.reshape(values.shape),
categories=categories,
reverse_categories=reverse_categories,
missing_value=missing_value,
)
@classmethod
def _from_codes_and_metadata(cls,
codes,
categories,
reverse_categories,
missing_value):
"""
View codes as a LabelArray and set LabelArray metadata on the result.
"""
ret = codes.view(type=cls, dtype=np.void)
ret._categories = categories
ret._reverse_categories = reverse_categories
ret._missing_value = missing_value
return ret
@property
def categories(self):
# This is a property because it should be immutable.
return self._categories
@property
def reverse_categories(self):
# This is a property because it should be immutable.
return self._reverse_categories
@property
def missing_value(self):
# This is a property because it should be immutable.
return self._missing_value
@property
def missing_value_code(self):
return self.reverse_categories[self.missing_value]
def has_label(self, value):
return value in self.reverse_categories
def __array_finalize__(self, obj):
"""
Called by Numpy after array construction.
There are three cases where this can happen:
1. Someone tries to directly construct a new array by doing::
>>> ndarray.__new__(LabelArray, ...) # doctest: +SKIP
In this case, obj will be None. We treat this as an error case and
fail.
2. Someone (most likely our own __new__) calls
other_array.view(type=LabelArray).
In this case, `self` will be the new LabelArray instance, and
``obj` will be the array on which ``view`` is being called.
The caller of ``obj.view`` is responsible for setting category
metadata on ``self`` after we exit.
3. Someone creates a new LabelArray by slicing an existing one.
In this case, ``obj`` will be the original LabelArray. We're
responsible for copying over the parent array's category metadata.
"""
if obj is None:
raise TypeError(
"Direct construction of LabelArrays is not supported."
)
# See docstring for an explanation of when these will or will not be
# set.
self._categories = getattr(obj, 'categories', None)
self._reverse_categories = getattr(obj, 'reverse_categories', None)
self._missing_value = getattr(obj, 'missing_value', None)
def as_int_array(self):
"""
Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data.
"""
return self.view(
type=ndarray,
dtype=int_dtype_with_size_in_bytes(self.itemsize),
)
def as_string_array(self):
"""
Convert self back into an array of strings.
This is an O(N) operation.
"""
return self.categories[self.as_int_array()]
def as_categorical(self, name=None):
"""
Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports.
"""
if len(self.shape) > 1:
raise ValueError("Can't convert a 2D array to a categorical.")
return pd.Categorical.from_codes(
self.as_int_array(),
# We need to make a copy because pandas >= 0.17 fails if this
# buffer isn't writeable.
self.categories.copy(),
ordered=False,
name=name,
)
def as_categorical_frame(self, index, columns, name=None):
"""
Coerce self into a pandas DataFrame of Categoricals.
"""
if len(self.shape) != 2:
raise ValueError(
"Can't convert a non-2D LabelArray into a DataFrame."
)
expected_shape = (len(index), len(columns))
if expected_shape != self.shape:
raise ValueError(
"Can't construct a DataFrame with provided indices:\n\n"
"LabelArray shape is {actual}, but index and columns imply "
"that shape should be {expected}.".format(
actual=self.shape,
expected=expected_shape,
)
)
return pd.Series(
index=pd.MultiIndex.from_product([index, columns]),
data=self.ravel().as_categorical(name=name),
).unstack()
def __setitem__(self, indexer, value):
self_categories = self.categories
if isinstance(value, LabelArray):
value_categories = value.categories
if compare_arrays(self_categories, value_categories):
return super(LabelArray, self).__setitem__(indexer, value)
else:
raise CategoryMismatch(self_categories, value_categories)
elif isinstance(value, self.SUPPORTED_SCALAR_TYPES):
value_code = self.reverse_categories.get(value, -1)
if value_code < 0:
raise ValueError("%r is not in LabelArray categories." % value)
self.as_int_array()[indexer] = value_code
else:
raise NotImplementedError(
"Setting into a LabelArray with a value of "
"type {type} is not yet supported.".format(
type=type(value).__name__,
),
)
def __setslice__(self, i, j, sequence):
"""
This method was deprecated in Python 2.0. It predates slice objects,
but Python 2.7.11 still uses it if you implement it, which ndarray
does. In newer Pythons, __setitem__ is always called, but we need to
manuallly forward in py2.
"""
self.__setitem__(slice(i, j), sequence)
def __getitem__(self, indexer):
result = super(LabelArray, self).__getitem__(indexer)
if result.ndim:
# Result is still a LabelArray, so we can just return it.
return result
# Result is a scalar value, which will be an instance of np.void.
# Map it back to one of our category entries.
index = result.view(int_dtype_with_size_in_bytes(self.itemsize))
return self.categories[index]
def is_missing(self):
"""
Like isnan, but checks for locations where we store missing values.
"""
return (
self.as_int_array() == self.reverse_categories[self.missing_value]
)
def not_missing(self):
"""
Like ~isnan, but checks for locations where we store missing values.
"""
return (
self.as_int_array() != self.reverse_categories[self.missing_value]
)
def _equality_check(op):
"""
Shared code for __eq__ and __ne__, parameterized on the actual
comparison operator to use.
"""
def method(self, other):
if isinstance(other, LabelArray):
self_mv = self.missing_value
other_mv = other.missing_value
if self_mv != other_mv:
raise MissingValueMismatch(self_mv, other_mv)
self_categories = self.categories
other_categories = other.categories
if not compare_arrays(self_categories, other_categories):
raise CategoryMismatch(self_categories, other_categories)
return (
op(self.as_int_array(), other.as_int_array())
& self.not_missing()
& other.not_missing()
)
elif isinstance(other, ndarray):
# Compare to ndarrays as though we were an array of strings.
# This is fairly expensive, and should generally be avoided.
return op(self.as_string_array(), other) & self.not_missing()
elif isinstance(other, self.SUPPORTED_SCALAR_TYPES):
i = self._reverse_categories.get(other, -1)
return op(self.as_int_array(), i) & self.not_missing()
return op(super(LabelArray, self), other)
return method
__eq__ = _equality_check(eq)
__ne__ = _equality_check(ne)
del _equality_check
def view(self, dtype=_NotPassed, type=_NotPassed):
if type is _NotPassed and dtype not in (_NotPassed, self.dtype):
raise TypeError("Can't view LabelArray as another dtype.")
# The text signature on ndarray.view makes it look like the default
# values for dtype and type are `None`, but passing None explicitly has
# different semantics than not passing an arg at all, so we reconstruct
# the kwargs dict here to simulate the args not being passed at all.
kwargs = {}
if dtype is not _NotPassed:
kwargs['dtype'] = dtype
if type is not _NotPassed:
kwargs['type'] = type
return super(LabelArray, self).view(**kwargs)
# In general, we support resizing, slicing, and reshaping methods, but not
# numeric methods.
SUPPORTED_NDARRAY_METHODS = frozenset([
'base',
'compress',
'copy',
'data',
'diagonal',
'dtype',
'flat',
'flatten',
'item',
'itemset',
'itemsize',
'nbytes',
'ndim',
'ravel',
'repeat',
'reshape',
'resize',
'setflags',
'shape',
'size',
'squeeze',
'strides',
'swapaxes',
'take',
'trace',
'transpose',
'view'
])
PUBLIC_NDARRAY_METHODS = frozenset([
s for s in dir(ndarray) if not s.startswith('_')
])
# Generate failing wrappers for all unsupported methods.
locals().update(
{
method: _make_unsupported_method(method)
for method in PUBLIC_NDARRAY_METHODS - SUPPORTED_NDARRAY_METHODS
}
)
def __repr__(self):
# This happens if you call a ufunc on a LabelArray that changes the
# dtype. This is generally an indicator that the array has been used
# incorrectly, and it means we're no longer valid for anything.
repr_lines = repr(self.as_string_array()).splitlines()
repr_lines[0] = repr_lines[0].replace('array(', 'LabelArray(', 1)
repr_lines[-1] = repr_lines[-1].rsplit(',', 1)[0] + ')'
# The extra spaces here account for the difference in length between
# 'array(' and 'LabelArray('.
return '\n '.join(repr_lines)
def empty_like(self, shape):
"""
Make an empty LabelArray with the same categories as ``self``, filled
with ``self.missing_value``.
"""
return type(self)._from_codes_and_metadata(
codes=np.full(
shape,
self.reverse_categories[self.missing_value],
dtype=int_dtype_with_size_in_bytes(self.itemsize),
),
categories=self.categories,
reverse_categories=self.reverse_categories,
missing_value=self.missing_value,
)
def map_predicate(self, f):
"""
Map a function from str -> bool element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always return False.
"""
# Functions passed to this are of type str -> bool. Don't ever call
# them on None, which is the only non-str value we ever store in
# categories.
if self.missing_value is None:
f_to_use = lambda x: False if x is None else f(x)
else:
f_to_use = f
# Call f on each unique value in our categories.
results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories)
# missing_value should produce False no matter what
results[self.reverse_categories[self.missing_value]] = False
# unpack the results form each unique value into their corresponding
# locations in our indices.
return results[self.as_int_array()]
def startswith(self, prefix):
"""
Element-wise startswith.
Parameters
----------
prefix : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self started with ``prefix``.
"""
return self.map_predicate(lambda elem: elem.startswith(prefix))
def endswith(self, suffix):
"""
Elementwise endswith.
Parameters
----------
suffix : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self ended with ``suffix``
"""
return self.map_predicate(lambda elem: elem.endswith(suffix))
def has_substring(self, substring):
"""
Elementwise contains.
Parameters
----------
substring : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self ended with ``suffix``.
"""
return self.map_predicate(lambda elem: substring in elem)
@preprocess(pattern=coerce(from_=(bytes, unicode), to=re.compile))
def matches(self, pattern):
"""
Elementwise regex match.
Parameters
----------
pattern : str or compiled regex
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self was matched by ``pattern``.
"""
return self.map_predicate(compose(bool, pattern.match))
# These types all implement an O(N) __contains__, so pre-emptively
# coerce to `set`.
@preprocess(container=coerce((list, tuple, np.ndarray), set))
def element_of(self, container):
"""
Check if each element of self is an of ``container``.
Parameters
----------
container : object
An object implementing a __contains__ to call on each element of
``self``.
Returns
-------
is_contained : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self was an element of ``container``.
"""
return self.map_predicate(container.__contains__)
| apache-2.0 |
giserh/mpld3 | mpld3/__init__.py | 20 | 1109 | """
Interactive D3 rendering of matplotlib images
=============================================
Functions: General Use
----------------------
:func:`fig_to_html`
convert a figure to an html string
:func:`fig_to_dict`
convert a figure to a dictionary representation
:func:`show`
launch a web server to view an d3/html figure representation
:func:`save_html`
save a figure to an html file
:func:`save_json`
save a JSON representation of a figure to file
Functions: IPython Notebook
---------------------------
:func:`display`
display a figure in an IPython notebook
:func:`enable_notebook`
enable automatic D3 display of figures in the IPython notebook.
:func:`disable_notebook`
disable automatic D3 display of figures in the IPython
"""
__all__ = ["__version__",
"fig_to_html", "fig_to_dict", "fig_to_d3", "display_d3",
"display", "show_d3", "show", "save_html", "save_json",
"enable_notebook", "disable_notebook", "plugins", "urls"]
from .__about__ import __version__
from . import plugins
from . import urls
from ._display import *
| bsd-3-clause |
EnvGen/toolbox | scripts/concoct/nr_approved_bins_summary.py | 1 | 1775 | #!/usr/bin/env python
"""
Based on all checkm results, creates a table containing the nr of approved genomes
for all binning runs it can find within binning/*.
@author: alneberg
"""
from __future__ import print_function
import sys
import os
import argparse
import pandas as pd
import glob
def find_checkm_dirs():
all_runs = {}
for path in glob.glob("binning/*/*/output_*/*/checkm_output/stats.tsv"):
run_d = {}
path_parts = path.split('/')
run_d["binner"] = path_parts[1]
run_d["sample"] = path_parts[2]
run_d["quant"] = "_".join(path_parts[3].split('_')[1:])
run_d["run_params"] = path_parts[4]
run_d['SpeedUp'] = 'SpeedUp_Mp' in path_parts[4]
run_d['standardize'] = 'standardize' in path_parts[4]
all_runs[path] = run_d
return all_runs
def main(args):
all_runs = find_checkm_dirs()
for path, run_d in all_runs.items():
# Read in the checkm table
df = pd.read_table(path, index_col=0)
# extract the ids for all rows that meet the requirements
nr_approved = len(df[(df['Completeness'] >= args.min_completeness) & (df['Contamination'] <= args.max_contamination)])
run_d['nr_approved'] = nr_approved
result_df = pd.DataFrame.from_dict(all_runs, orient='index')
result_df.to_csv(sys.stdout, sep='\t', columns=['binner', 'sample', 'quant', 'run_params', 'SpeedUp', 'standardize', 'nr_approved'], index_label='path')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--min_completeness", default=70, type=float, help="default=70")
parser.add_argument("--max_contamination", default=5, type=float, help="default=5")
args = parser.parse_args()
main(args)
| mit |
landryb/QGIS | python/plugins/processing/algs/qgis/VectorLayerScatterplot.py | 15 | 3160 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EquivalentNumField.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class VectorLayerScatterplot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
XFIELD = 'XFIELD'
YFIELD = 'YFIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Vector layer scatterplot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.XFIELD,
self.tr('X attribute'), self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.YFIELD,
self.tr('Y attribute'), self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Scatterplot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
xfieldname = self.getParameterValue(self.XFIELD)
yfieldname = self.getParameterValue(self.YFIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, xfieldname, yfieldname)
plt.close()
plt.scatter(values[xfieldname], values[yfieldname])
plt.ylabel(yfieldname)
plt.xlabel(xfieldname)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
| gpl-2.0 |
saiwing-yeung/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
nvoron23/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
snfactory/cubefit | cubefit/tests/test_fitting.py | 2 | 12205 | #/usr/bin/env py.test
from __future__ import print_function
import os
import sys
import sysconfig
import numpy as np
from numpy.fft import fft2, ifft2
from numpy.testing import assert_allclose
from scipy.optimize import check_grad, approx_fprime
import cubefit
from cubefit.fitting import (sky_and_sn,
chisq_galaxy_single,
chisq_galaxy_sky_multi,
chisq_position_sky_sn_multi)
# -----------------------------------------------------------------------------
# Helper functions
def assert_real(x):
if np.all((x.imag == 0.) & (x.real == 0.)):
return
absfrac = np.abs(x.imag / x.real)
mask = absfrac < 1.e-3 #1.e-4
if not np.all(mask):
raise RuntimeError("array not real: max imag/real = {:g}"
.format(np.max(absfrac)))
def fftconvolve(x, kernel):
"""convolve 2-d array x with a kernel *centered* in array."""
ny, nx = kernel.shape
xctr, yctr = (nx-1)/2., (ny-1)/2.
# Phasor that will shift kernel to be centered at (0., 0.)
fshift = cubefit.fft_shift_phasor_2d(kernel.shape, (-xctr, -yctr))
return ifft2(fft2(kernel) * fft2(x) * fshift).real
def plot_gradient(im, fname, **kwargs):
"""Helper function for debugging only."""
import matplotlib.pyplot as plt
plt.imshow(im, cmap="bone", interpolation="nearest", origin="lower",
**kwargs)
plt.colorbar()
plt.savefig(fname)
plt.clf()
# -----------------------------------------------------------------------------
def test_sky_and_sn():
truesky = 3. * np.ones((10,))
truesn = 2. * np.ones((10,))
g = np.ones((10, 5, 5)) # fake galaxy, after convolution with PSF
s = np.zeros((10, 5, 5))
s[:, 3, 3] = 1. # psf is a single pixel
data = g + truesky[:, None, None] + truesn[:, None, None] * s
weight = np.ones_like(data)
sky, sn = cubefit.fitting.sky_and_sn(data, weight, g, s)
assert_allclose(sky, truesky)
assert_allclose(sn, truesn)
class TestFitting:
def setup_class(self):
"""Create some dummy data and a PSF."""
# some settings
MODEL_SHAPE = (32, 32)
nt = 3
nw = 3
ny = 15
nx = 15
yoffs = np.array([7,8,9]) # offset between model and data
xoffs = np.array([8,9,7])
# True data yctr, xctr given offset
self.trueyctrs = yoffs + (ny-1)/2. - (MODEL_SHAPE[0]-1)/2.
self.truexctrs = xoffs + (nx-1)/2. - (MODEL_SHAPE[1]-1)/2.
# Create a "true" underlying galaxy. This can be anything, but it
# should not be all zeros or flat.
ellip = 4.5 * np.ones(nw)
alpha = 6.0 * np.ones(nw)
sigma = 6.0 * np.ones(nw)
beta = 2. * np.ones(nw)
eta = 1.04 * np.ones(nw)
truegal = cubefit.psffuncs.gaussian_moffat_psf(
sigma, alpha, beta, ellip, eta,
np.zeros(nw) - 2., np.zeros(nw) - 2., MODEL_SHAPE)
# Create a PSF.
ellip = 1.5 * np.ones(nw)
alpha = 2.0 * np.ones(nw)
sigma = 2.0 * np.ones(nw)
beta = 2. * np.ones(nw)
eta = 1.04 * np.ones(nw)
yctr = np.zeros(nw)
xctr = np.zeros(nw)
A = cubefit.psffuncs.gaussian_moffat_psf(sigma, alpha, beta, ellip,
eta, yctr, xctr, MODEL_SHAPE)
self.psf = cubefit.TabularPSF(A)
# create the data by convolving the true galaxy model with the psf
# and taking a slice.
cubes = []
for j in range(nt):
data = np.empty((nw, ny, nx), dtype=np.float32)
for i in range(nw):
data_2d = fftconvolve(truegal[i], A[i])
data[i, :, :] = data_2d[yoffs[j]:yoffs[j]+ny,
xoffs[j]:xoffs[j]+nx]
cubes.append(cubefit.DataCube(data, np.ones_like(data),
np.ones(nw)))
self.cubes = cubes
# initialize galaxy model
self.galaxy = np.zeros((nw, MODEL_SHAPE[0], MODEL_SHAPE[1]))
self.truegal = truegal
def test_chisq_galaxy_single_gradient(self):
"""Test that gradient function (used in galaxy fitting) returns value
close to what you get with a finite differences method.
"""
EPS = 1.e-7
data = self.cubes[0].data
weight = self.cubes[0].weight
psf = self.psf
ctr = (0., 0.)
# analytic gradient is `grad`
val, grad = chisq_galaxy_single(self.galaxy, data, weight, ctr, psf)
# save data - model residuals for finite differences chi^2 gradient.
# need to carry out subtraction in float64 to avoid round-off errors.
scene = psf.evaluate_galaxy(self.galaxy, data.shape[1:3], ctr)
r0 = data.astype(np.float64) - scene
# finite differences gradient: alter each element by EPS one
# at a time and recalculate chisq.
fdgrad = np.zeros_like(self.galaxy)
nk, nj, ni = self.galaxy.shape
for k in range(nk):
for j in range(nj):
for i in range(ni):
self.galaxy[k, j, i] += EPS
scene = psf.evaluate_galaxy(self.galaxy, data.shape[1:3],
ctr)
self.galaxy[k, j, i] -= EPS # reset model value.
# NOTE: rather than calculating
# chisq1 - chisq0 = sum(w * r1^2) - sum(w * r0^2)
# we calculate
# sum(w * (r1^2 - r0^2))
# which is the same quantity but avoids summing large
# numbers.
r1 = data.astype(np.float64) - scene
chisq_diff = np.sum(weight * (r1**2 - r0**2))
fdgrad[k, j, i] = chisq_diff / EPS
assert_allclose(grad, fdgrad, rtol=0.001, atol=0.)
def test_chisq_galaxy_sky_multi_gradient(self):
"""Test that gradient function (used in galaxy fitting) returns value
close to what you get with a finite differences method.
"""
EPS = 1.e-8
datas = [self.cubes[0].data]
weights = [self.cubes[0].weight]
psfs = [self.psf]
ctrs = [(0., 0.)]
# analytic gradient is `grad`
_, grad = chisq_galaxy_sky_multi(self.galaxy, datas, weights, ctrs,
psfs)
# NOTE: Following is specific to only having one cube!
data = datas[0]
weight = weights[0]
psf = psfs[0]
ctr = ctrs[0]
# save data - model residuals for finite differences chi^2 gradient.
# need to carry out subtraction in float64 to avoid round-off errors.
scene = psf.evaluate_galaxy(self.galaxy, data.shape[1:3], ctr)
r0 = data.astype(np.float64) - scene
sky = np.average(r0, weights=weight, axis=(1, 2))
r0 -= sky[:, None, None]
# finite differences gradient: alter each element by EPS one
# at a time and recalculate chisq.
fdgrad = np.zeros_like(self.galaxy)
nk, nj, ni = self.galaxy.shape
for k in range(nk):
for j in range(nj):
for i in range(ni):
self.galaxy[k, j, i] += EPS
scene = psf.evaluate_galaxy(self.galaxy, data.shape[1:3],
ctr)
self.galaxy[k, j, i] -= EPS # reset model value.
# NOTE: rather than calculating
# chisq1 - chisq0 = sum(w * r1^2) - sum(w * r0^2)
# we calculate
# sum(w * (r1^2 - r0^2))
# which is the same quantity but avoids summing large
# numbers.
r1 = data.astype(np.float64) - scene
sky = np.average(r1, weights=weight, axis=(1, 2))
r1 -= sky[:, None, None]
chisq_diff = np.sum(weight * (r1**2 - r0**2))
fdgrad[k, j, i] = chisq_diff / EPS
assert_allclose(grad, fdgrad, rtol=0.005, atol=0.)
def pixel_regpenalty_diff(self, regpenalty, galmodel, k, j, i, eps):
"""What is the difference in the regpenalty caused by changing
galmodel[k, j, i] by EPS?"""
def galnorm(k, j, i, eps=0.0):
return ((galmodel[k, j, i] + eps - regpenalty.galprior[k, j, i]) /
regpenalty.mean_gal_spec[k])
dchisq = 0.
if k > 0:
d0 = galnorm(k, j, i) - galnorm(k-1, j, i)
d1 = galnorm(k, j, i, eps) - galnorm(k-1, j, i)
dchisq += regpenalty.mu_wave * (d1**2 - d0**2)
if k < galmodel.shape[0] - 1:
d0 = galnorm(k+1, j, i) - galnorm(k, j, i)
d1 = galnorm(k+1, j, i) - galnorm(k, j, i + eps)
dchisq += regpenalty.mu_wave * (d1**2 - d0**2)
if j > 0:
d0 = galnorm(k, j, i) - galnorm(k, j-1, i)
d1 = galnorm(k, j, i, eps) - galnorm(k, j-1, i)
dchisq += regpenalty.mu_xy * (d1**2 - d0**2)
if j < galmodel.shape[1] - 1:
d0 = galnorm(k, j+1, i) - galnorm(k, j, i)
d1 = galnorm(k, j+1, i) - galnorm(k, j, i, eps)
dchisq += regpenalty.mu_xy * (d1**2 - d0**2)
if i > 0:
d0 = galnorm(k, j, i) - galnorm(k, j, i-1)
d1 = galnorm(k, j, i, eps) - galnorm(k, j, i-1)
dchisq += regpenalty.mu_xy * (d1**2 - d0**2)
if i < galmodel.shape[2] - 1:
d0 = galnorm(k, j, i+1) - galnorm(k, j, i)
d1 = galnorm(k, j, i+1) - galnorm(k, j, i, eps)
dchisq += regpenalty.mu_xy * (d1**2 - d0**2)
return dchisq
def test_regularization_penalty_gradient(self):
"""Ensure that regularization penalty gradient matches what you
get with a finite-differences approach."""
EPS = 1.e-10
mu_wave = 0.07
mu_xy = 0.001
# set galaxy model to best-fit (so that it is not all zeros!)
self.galaxy[:, :, :] = self.truegal
mean_gal_spec = np.average(self.cubes[0].data, axis=(1, 2))
galprior = np.zeros_like(self.galaxy)
regpenalty = cubefit.RegularizationPenalty(galprior, mean_gal_spec,
mu_xy, mu_wave)
_, grad = regpenalty(self.galaxy)
fdgrad = np.zeros_like(self.galaxy)
nk, nj, ni = self.galaxy.shape
for k in range(nk):
for j in range(nj):
for i in range(ni):
fdgrad[k, j, i] = self.pixel_regpenalty_diff(
regpenalty, self.galaxy, k, j, i, EPS) / EPS
rtol = 0.001
atol = 1.e-5 * np.max(np.abs(fdgrad))
assert_allclose(grad, fdgrad, rtol=rtol, atol=atol)
def test_point_source(self):
"""Test that evaluate_point_source returns the expected point source.
"""
psf = self.psf.point_source((0., 0.), (15, 15), (0., 0.))
def test_fit_position_grad(self):
"""Test the gradient of the sn and sky position fitting function
"""
def func_part(ctrs, galaxy, datas, weights, psfs):
chisq, grad = chisq_position_sky_sn_multi(ctrs, galaxy,
datas, weights, psfs)
return chisq
def grad_part(ctrs, galaxy, datas, weights, psfs):
chisq, grad = chisq_position_sky_sn_multi(ctrs, galaxy,
datas, weights, psfs)
return grad
x0s = np.zeros(8)
datas = [cube.data for cube in self.cubes]
weights = [cube.weight for cube in self.cubes]
psfs = [self.psf for cube in self.cubes]
code_grad = grad_part(x0s, self.galaxy, datas, weights, psfs)
test_grad = approx_fprime(x0s, func_part, np.sqrt(np.finfo(float).eps),
self.galaxy, datas, weights, psfs)
assert_allclose(code_grad[:-2], test_grad[:-2], rtol=0.005)
| mit |
Averroes/statsmodels | statsmodels/iolib/summary2.py | 21 | 19583 | from statsmodels.compat.python import (lrange, iterkeys, iteritems, lzip,
reduce, itervalues, zip, string_types,
range)
from statsmodels.compat.collections import OrderedDict
import numpy as np
import pandas as pd
import datetime
import textwrap
from .table import SimpleTable
from .tableformatting import fmt_latex, fmt_txt
class Summary(object):
def __init__(self):
self.tables = []
self.settings = []
self.extra_txt = []
self.title = None
def __str__(self):
return self.as_text()
def __repr__(self):
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_df(self, df, index=True, header=True, float_format='%.4f',
align='r'):
'''Add the contents of a DataFrame to summary table
Parameters
----------
df : DataFrame
header: bool
Reproduce the DataFrame column labels in summary table
index: bool
Reproduce the DataFrame row labels in summary table
float_format: string
Formatting to float data columns
align : string
Data alignment (l/c/r)
'''
settings = {'index': index, 'header': header,
'float_format': float_format, 'align': align}
self.tables.append(df)
self.settings.append(settings)
def add_array(self, array, align='r', float_format="%.4f"):
'''Add the contents of a Numpy array to summary table
Parameters
----------
array : numpy array (2D)
float_format: string
Formatting to array if type is float
align : string
Data alignment (l/c/r)
'''
table = pd.DataFrame(array)
self.add_df(table, index=False, header=False,
float_format=float_format, align=align)
def add_dict(self, d, ncols=2, align='l', float_format="%.4f"):
'''Add the contents of a Dict to summary table
Parameters
----------
d : dict
Keys and values are automatically coerced to strings with str().
Users are encouraged to format them before using add_dict.
ncols: int
Number of columns of the output table
align : string
Data alignment (l/c/r)
'''
keys = [_formatter(x, float_format) for x in iterkeys(d)]
vals = [_formatter(x, float_format) for x in itervalues(d)]
data = np.array(lzip(keys, vals))
if data.shape[0] % ncols != 0:
pad = ncols - (data.shape[0] % ncols)
data = np.vstack([data, np.array(pad * [['', '']])])
data = np.split(data, ncols)
data = reduce(lambda x, y: np.hstack([x, y]), data)
self.add_array(data, align=align)
def add_text(self, string):
'''Append a note to the bottom of the summary table. In ASCII tables,
the note will be wrapped to table width. Notes are not indendented.
'''
self.extra_txt.append(string)
def add_title(self, title=None, results=None):
'''Insert a title on top of the summary table. If a string is provided
in the title argument, that string is printed. If no title string is
provided but a results instance is provided, statsmodels attempts
to construct a useful title automatically.
'''
if isinstance(title, string_types):
self.title = title
else:
try:
model = results.model.__class__.__name__
if model in _model_types:
model = _model_types[model]
self.title = 'Results: ' + model
except:
self.title = ''
def add_base(self, results, alpha=0.05, float_format="%.4f", title=None,
xname=None, yname=None):
'''Try to construct a basic summary instance.
Parameters
----------
results : Model results instance
alpha : float
significance level for the confidence intervals (optional)
float_formatting: string
Float formatting for summary of parameters (optional)
title : string
Title of the summary table (optional)
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
'''
param = summary_params(results, alpha=alpha, use_t=results.use_t)
info = summary_model(results)
if xname is not None:
param.index = xname
if yname is not None:
info['Dependent Variable:'] = yname
self.add_dict(info, align='l')
self.add_df(param, float_format=float_format)
self.add_title(title=title, results=results)
def as_text(self):
'''Generate ASCII Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
extra_txt = self.extra_txt
pad_col, pad_index, widest = _measure_tables(tables, settings)
rule_equal = widest * '='
#TODO: this isn't used anywhere?
rule_dash = widest * '-'
simple_tables = _simple_tables(tables, settings, pad_col, pad_index)
tab = [x.as_text() for x in simple_tables]
tab = '\n'.join(tab)
tab = tab.split('\n')
tab[0] = rule_equal
tab.append(rule_equal)
tab = '\n'.join(tab)
if title is not None:
title = title
if len(title) < widest:
title = ' ' * int(widest/2 - len(title)/2) + title
else:
title = ''
txt = [textwrap.wrap(x, widest) for x in extra_txt]
txt = ['\n'.join(x) for x in txt]
txt = '\n'.join(txt)
out = '\n'.join([title, tab, txt])
return out
def as_html(self):
'''Generate HTML Summary Table
'''
tables = self.tables
settings = self.settings
#TODO: this isn't used anywhere
title = self.title
simple_tables = _simple_tables(tables, settings)
tab = [x.as_html() for x in simple_tables]
tab = '\n'.join(tab)
return tab
def as_latex(self):
'''Generate LaTeX Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
if title is not None:
title = '\\caption{' + title + '} \\\\'
else:
title = '\\caption{}'
simple_tables = _simple_tables(tables, settings)
tab = [x.as_latex_tabular() for x in simple_tables]
tab = '\n\\hline\n'.join(tab)
out = '\\begin{table}', title, tab, '\\end{table}'
out = '\n'.join(out)
return out
def _measure_tables(tables, settings):
'''Compare width of ascii tables in a list and calculate padding values.
We add space to each col_sep to get us as close as possible to the
width of the largest table. Then, we add a few spaces to the first
column to pad the rest.
'''
simple_tables = _simple_tables(tables, settings)
tab = [x.as_text() for x in simple_tables]
length = [len(x.splitlines()[0]) for x in tab]
len_max = max(length)
pad_sep = []
pad_index = []
for i in range(len(tab)):
nsep = tables[i].shape[1] - 1
pad = int((len_max - length[i]) / nsep)
pad_sep.append(pad)
len_new = length[i] + nsep * pad
pad_index.append(len_max - len_new)
return pad_sep, pad_index, max(length)
# Useful stuff
_model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'NBin': 'Negative binomial model',
'GLM' : 'Generalized linear model'
}
def summary_model(results):
'''Create a dict with information about the model
'''
def time_now(*args, **kwds):
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d %H:%M')
info = OrderedDict()
info['Model:'] = lambda x: x.model.__class__.__name__
info['Model Family:'] = lambda x: x.family.__class.__name__
info['Link Function:'] = lambda x: x.family.link.__class__.__name__
info['Dependent Variable:'] = lambda x: x.model.endog_names
info['Date:'] = time_now
info['No. Observations:'] = lambda x: "%#6d" % x.nobs
info['Df Model:'] = lambda x: "%#6d" % x.df_model
info['Df Residuals:'] = lambda x: "%#6d" % x.df_resid
info['Converged:'] = lambda x: x.mle_retvals['converged']
info['No. Iterations:'] = lambda x: x.mle_retvals['iterations']
info['Method:'] = lambda x: x.method
info['Norm:'] = lambda x: x.fit_options['norm']
info['Scale Est.:'] = lambda x: x.fit_options['scale_est']
info['Cov. Type:'] = lambda x: x.fit_options['cov']
info['R-squared:'] = lambda x: "%#8.3f" % x.rsquared
info['Adj. R-squared:'] = lambda x: "%#8.3f" % x.rsquared_adj
info['Pseudo R-squared:'] = lambda x: "%#8.3f" % x.prsquared
info['AIC:'] = lambda x: "%8.4f" % x.aic
info['BIC:'] = lambda x: "%8.4f" % x.bic
info['Log-Likelihood:'] = lambda x: "%#8.5g" % x.llf
info['LL-Null:'] = lambda x: "%#8.5g" % x.llnull
info['LLR p-value:'] = lambda x: "%#8.5g" % x.llr_pvalue
info['Deviance:'] = lambda x: "%#8.5g" % x.deviance
info['Pearson chi2:'] = lambda x: "%#6.3g" % x.pearson_chi2
info['F-statistic:'] = lambda x: "%#8.4g" % x.fvalue
info['Prob (F-statistic):'] = lambda x: "%#6.3g" % x.f_pvalue
info['Scale:'] = lambda x: "%#8.5g" % x.scale
out = OrderedDict()
for key, func in iteritems(info):
try:
out[key] = func(results)
# NOTE: some models don't have loglike defined (RLM), so that's NIE
except (AttributeError, KeyError, NotImplementedError):
pass
return out
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, float_format="%.4f"):
'''create a summary table of parameters from results instance
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
float_format : string
float formatting options (e.g. ".3g")
Returns
-------
params_table : SimpleTable instance
'''
if isinstance(results, tuple):
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
bse = results.bse
tvalues = results.tvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
data = np.array([params, bse, tvalues, pvalues]).T
data = np.hstack([data, conf_int])
data = pd.DataFrame(data)
if use_t:
data.columns = ['Coef.', 'Std.Err.', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
data.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if not xname:
data.index = results.model.exog_names
else:
data.index = xname
return data
# Vertical summary instance for multiple models
def _col_params(result, float_format='%.4f', stars=True):
'''Stack coefficients and standard errors in single column
'''
# Extract parameters
res = summary_params(result)
# Format float
for col in res.columns[:2]:
res[col] = res[col].apply(lambda x: float_format % x)
# Std.Errors in parentheses
res.ix[:, 1] = '(' + res.ix[:, 1] + ')'
# Significance stars
if stars:
idx = res.ix[:, 3] < .1
res.ix[idx, 0] = res.ix[idx, 0] + '*'
idx = res.ix[:, 3] < .05
res.ix[idx, 0] = res.ix[idx, 0] + '*'
idx = res.ix[:, 3] < .01
res.ix[idx, 0] = res.ix[idx, 0] + '*'
# Stack Coefs and Std.Errors
res = res.ix[:, :2]
res = res.stack()
res = pd.DataFrame(res)
res.columns = [str(result.model.endog_names)]
return res
def _col_info(result, info_dict=None):
'''Stack model info in a column
'''
if info_dict is None:
info_dict = {}
out = []
index = []
for i in info_dict:
if isinstance(info_dict[i], dict):
# this is a specific model info_dict, but not for this result...
continue
try:
out.append(info_dict[i](result))
except:
out.append('')
index.append(i)
out = pd.DataFrame({str(result.model.endog_names): out}, index=index)
return out
def _make_unique(list_of_names):
if len(set(list_of_names)) == len(list_of_names):
return list_of_names
# pandas does not like it if multiple columns have the same names
from collections import defaultdict
name_counter = defaultdict(str)
header = []
for _name in list_of_names:
name_counter[_name] += "I"
header.append(_name+" " + name_counter[_name])
return header
def summary_col(results, float_format='%.4f', model_names=[], stars=False,
info_dict=None, regressor_order=[]):
"""
Summarize multiple results instances side-by-side (coefs and SEs)
Parameters
----------
results : statsmodels results instance or list of result instances
float_format : string
float format for coefficients and standard errors
Default : '%.4f'
model_names : list of strings of length len(results) if the names are not
unique, a roman number will be appended to all model names
stars : bool
print significance stars
info_dict : dict
dict of lambda functions to be applied to results instances to retrieve
model info. To use specific information for different models, add a
(nested) info_dict with model name as the key.
Example: `info_dict = {"N":..., "R2": ..., "OLS":{"R2":...}}` would
only show `R2` for OLS regression models, but additionally `N` for
all other results.
Default : None (use the info_dict specified in
result.default_model_infos, if this property exists)
regressor_order : list of strings
list of names of the regressors in the desired order. All regressors
not specified will be appended to the end of the list.
"""
if not isinstance(results, list):
results = [results]
cols = [_col_params(x, stars=stars, float_format=float_format) for x in
results]
# Unique column names (pandas has problems merging otherwise)
if model_names:
colnames = _make_unique(model_names)
else:
colnames = _make_unique([x.columns[0] for x in cols])
for i in range(len(cols)):
cols[i].columns = [colnames[i]]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
summ = reduce(merg, cols)
if regressor_order:
varnames = summ.index.get_level_values(0).tolist()
ordered = [x for x in regressor_order if x in varnames]
unordered = [x for x in varnames if x not in regressor_order + ['']]
order = ordered + list(np.unique(unordered))
f = lambda idx: sum([[x + 'coef', x + 'stde'] for x in idx], [])
summ.index = f(np.unique(varnames))
summ = summ.reindex(f(order))
summ.index = [x[:-4] for x in summ.index]
idx = pd.Series(lrange(summ.shape[0])) % 2 == 1
summ.index = np.where(idx, '', summ.index.get_level_values(0))
# add infos about the models.
if info_dict:
cols = [_col_info(x, info_dict.get(x.model.__class__.__name__,
info_dict)) for x in results]
else:
cols = [_col_info(x, getattr(x, "default_model_infos", None)) for x in
results]
# use unique column names, otherwise the merge will not succeed
for df , name in zip(cols, _make_unique([df.columns[0] for df in cols])):
df.columns = [name]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
info = reduce(merg, cols)
dat = pd.DataFrame(np.vstack([summ, info])) # pd.concat better, but error
dat.columns = summ.columns
dat.index = pd.Index(summ.index.tolist() + info.index.tolist())
summ = dat
summ = summ.fillna('')
smry = Summary()
smry.add_df(summ, header=True, align='l')
smry.add_text('Standard errors in parentheses.')
if stars:
smry.add_text('* p<.1, ** p<.05, ***p<.01')
return smry
def _formatter(element, float_format='%.4f'):
try:
out = float_format % element
except:
out = str(element)
return out.strip()
def _df_to_simpletable(df, align='r', float_format="%.4f", header=True,
index=True, table_dec_above='-', table_dec_below=None,
header_dec_below='-', pad_col=0, pad_index=0):
dat = df.copy()
dat = dat.applymap(lambda x: _formatter(x, float_format))
if header:
headers = [str(x) for x in dat.columns.tolist()]
else:
headers = None
if index:
stubs = [str(x) + int(pad_index) * ' ' for x in dat.index.tolist()]
else:
dat.ix[:, 0] = [str(x) + int(pad_index) * ' ' for x in dat.ix[:, 0]]
stubs = None
st = SimpleTable(np.array(dat), headers=headers, stubs=stubs,
ltx_fmt=fmt_latex, txt_fmt=fmt_txt)
st.output_formats['latex']['data_aligns'] = align
st.output_formats['txt']['data_aligns'] = align
st.output_formats['txt']['table_dec_above'] = table_dec_above
st.output_formats['txt']['table_dec_below'] = table_dec_below
st.output_formats['txt']['header_dec_below'] = header_dec_below
st.output_formats['txt']['colsep'] = ' ' * int(pad_col + 1)
return st
def _simple_tables(tables, settings, pad_col=None, pad_index=None):
simple_tables = []
float_format = '%.4f'
if pad_col is None:
pad_col = [0] * len(tables)
if pad_index is None:
pad_index = [0] * len(tables)
for i, v in enumerate(tables):
index = settings[i]['index']
header = settings[i]['header']
align = settings[i]['align']
simple_tables.append(_df_to_simpletable(v, align=align,
float_format=float_format,
header=header, index=index,
pad_col=pad_col[i],
pad_index=pad_index[i]))
return simple_tables
| bsd-3-clause |
dialounke/pylayers | pylayers/gui/PylayersGui.py | 1 | 23274 | # -*- coding: utf-8 -*-
"""
PyLayers GUI
.. autommodule::
:members:
To run this code. type
python PylayersGui.py
"""
from pylayers.simul.link import *
import pylayers.util.pyutil as pyu
import pylayers.signal.standard as std
from pylayers.util.project import *
import json
# TEST
import matplotlib
matplotlib.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT
from matplotlib.figure import Figure
from pyface.qt import QtGui,QtCore
from traitsui.qt4.editor import Editor
from traitsui.qt4.basic_editor_factory import BasicEditorFactory
# console ipython
from IPython import embed_kernel
from traits.api import HasTraits, Button,Range,Enum, Instance, \
on_trait_change,property_depends_on,Float,Str,Int,Bool,List
from traitsui.api import View, Item,HSplit,VSplit, RangeEditor, \
EnumEditor,Group,spring,HGroup,VGroup,Handler, \
InstanceEditor
from traitsui.menu import Action, ActionGroup, Menu, MenuBar, ToolBar
from mayavi.core.api import PipelineBase
from mayavi.core.ui.api import MayaviScene, SceneEditor, \
MlabSceneModel
from tvtk.pyface.api import Scene
try:
get_ipython
except NameError:
banner=exit_msg=''
else:
banner = '*** Nested interpreter ***'
exit_msg = '*** Back in main IPython ***'
# First import the embed function
from IPython.frontend.terminal.embed import InteractiveShellEmbed
## INIT DLink object
DL=DLink()
filename=pyu.getlong('wstd.json',pstruc['DIRSIMUL'])
fp = open(filename)
stds = json.load(fp)
av_wstds = ['None']+ stds.keys()
dchann = {w:[str(i) for i in std.Wstandard(w).chan.keys()] for w in av_wstds if w !='None'}
dchann.update({'None':['None']})
from qtconsole.rich_ipython_widget import RichJupyterWidget
from qtconsole.inprocess import QtInProcessKernelManager
from IPython.lib import guisupport
class QIPythonWidget(RichJupyterWidget):
""" Convenience class for a live IPython console widget. We can replace the standard banner using the customBanner argument"""
def __init__(self,customBanner=None,*args,**kwargs):
if not customBanner is None: self.banner=customBanner
super(QIPythonWidget, self).__init__(*args,**kwargs)
self.kernel_manager = kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel()
kernel_manager.kernel.gui = 'qt4'
self.kernel_client = kernel_client = self._kernel_manager.client()
kernel_client.start_channels()
def stop():
kernel_client.stop_channels()
kernel_manager.shutdown_kernel()
guisupport.get_app_qt4().exit()
self.exit_requested.connect(stop)
def pushVariables(self,variableDict):
""" Given a dictionary containing name / value pairs, push those variables to the IPython console widget """
self.kernel_manager.kernel.shell.push(variableDict)
def clearTerminal(self):
""" Clears the terminal """
self._control.clear()
def printText(self,text):
""" Prints some plain text to the console """
self._append_plain_text(text)
def executeCommand(self,command):
""" Execute a command in the frame of the console widget """
self._execute(command,False)
class JupyterWidget(QtGui.QWidget):
""" Main GUI Widget including a button and IPython Console widget
inside vertical layout
"""
def __init__(self, parent=None):
super(JupyterWidget, self).__init__(parent)
layout = QtGui.QVBoxLayout(self)
ipyConsole = QIPythonWidget()
layout.addWidget(ipyConsole)
# ipyConsole.pushVariables({'DL':DL})
allvar = globals()
allvar.update(locals())
ipyConsole.pushVariables(allvar)
class _MPLFigureEditor(Editor):
scrollable = True
def init(self, parent):
self.control = self._create_canvas(parent)
self.set_tooltip()
def update_editor(self):
pass
def _create_canvas(self, parent):
""" Create the MPL canvas. """
# matplotlib commands to create a canvas
frame = QtGui.QWidget()
mpl_canvas = FigureCanvas(self.value)
mpl_toolbar = NavigationToolbar2QT(parent=frame,canvas = mpl_canvas)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(mpl_canvas)
vbox.addWidget(mpl_toolbar)
frame.setLayout(vbox)
mpl_canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
mpl_canvas.setFocus()
return frame#mpl_canvas
class MPLFigureEditor(BasicEditorFactory):
klass = _MPLFigureEditor
class WstdHandler(Handler):
channels = List(Str)
def object_Wstd_Enum_changed(self, info):
"""
This method listens for a change in the *state* attribute of the
object (Address) being viewed.
When this listener method is called, *info.object* is a reference to
the viewed object (Address).
"""
# Change the list of available cities
self.channels = dchann[info.object.Wstd_Enum]
# As default value, use the first city in the list:
info.object.chann = self.channels[0]
# info.object.DL.fGHz =
class PylayersGUI(HasTraits):
# slider/dropdown widgets etc
# Layout
laynames = [''] + np.sort(os.listdir(basename +'/struc/lay/')).tolist()#['','DLR.lay','defstr.lay','TC2_METIS.lay']#,
Lay_Enum = Enum(laynames)
## Antenna file :
av_ant = ['Omni','Gauss','aperture']
antext= ['vsh3','sh3']
for fname in os.listdir(basename +'/ant'):
if fname.split('.')[-1] in antext:
av_ant.append(fname)
# Init Positions
xmin = DL.L.ax[0]
xmax = DL.L.ax[1]
ymin = DL.L.ax[2]
ymax = DL.L.ax[3]
zmin = 0.
zmax = DL.L.maxheight-0.1
# Antenna
## position a
aX = Range(low='xmin',high='xmax',value= float(xmin+xmax/2.))
aY = Range(low='ymin',high='ymax',value= float(ymin+ymax/2.))
aZ = Range(low='zmin',high='zmax',value= float(zmin+zmax/2.))
## rotation a
agamma = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
abeta = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
aalpha = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
## file a:
a_ant = Enum(av_ant)
# Antenna B
## position b
bX = Range(low='xmin',high='xmax',value= float(xmin+xmax/2.))
bY = Range(low='ymin',high='ymax',value= float(ymin+ymax/2.))
bZ = Range(low='zmin',high='zmax',value= float(zmin+zmax/2.))
## rotation b
bgamma = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
bbeta = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
balpha = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
## file b:
b_ant = Enum(av_ant)
# frequency
fmmin = 0.
fmmax = 300.
fmin=Range(low = 'fmmin', high = 'fmmax',value = float(DL.Aa.fGHz[0]) )
fmax=Range(low = 'fmmin', high = 'fmmax',value = float(DL.Aa.fGHz[-1]) )
fstep=Range(low = 0,high = 10, value = 0)
# advanced
# init interface
scene = Instance(MlabSceneModel, ())
plot = Instance(PipelineBase)
# @on_trait_change('scene.activated')
# def init_plot(self):
# DL._show3()
# When the scene is activated, or when the parameters are changed, we
# update the plot.
# def _open_changed(self):
# """ Handles the user clicking the 'Open...' button.
# """
# path = pyu.getlong('',pstruc['DIRSTR'])
# file_name = open_file(file_name= path ,extensions = FileInfo())
# if file_name != '':
# self.file_name = file_name
@on_trait_change('Lay_Enum')
def update_L(self):
if self.Lay_Enum != ' ':
mlab.clf()
DL.L=Layout(self.Lay_Enum,bgraphs=True)
self.xmin=DL.L.ax[0]
self.xmax=DL.L.ax[1]
self.ymin=DL.L.ax[2]
self.ymax=DL.L.ax[3]
self.zmin=0.
self.zmax=DL.L.maxheight-0.1
self.aX,self.aY,self.aZ=DL.a
self.bX,self.bY,self.bZ=DL.b
DL.a= np.array([self.aX,self.aY,self.aZ])
DL.b= np.array([self.bX,self.bY,self.bZ])
self.cutoff = DL.cutoff
if not hasattr(DL,'_maya_fig'):
DL._show3()
@on_trait_change('cutoff,threshold')
def update_cutoff_threshold(self):
""" update position ant a
"""
DL.cutoff = self.cutoff
DL.threshold = self.threshold/100.
@on_trait_change('aX,aY,aZ')
def update_a(self):
""" update position ant a
"""
self.clear_fig()
DL.a= np.array([self.aX,self.aY,self.aZ])
self.cutoff = DL.cutoff
@on_trait_change('bX,bY,bZ')
def update_b(self):
""" update position ant b
"""
self.clear_fig()
DL.b= np.array([self.bX,self.bY,self.bZ])
self.cutoff = DL.cutoff
@on_trait_change('aalpha,abeta,agamma')
def update_Ta(self):
""" update rot ant a
"""
T = geu.MEulerAngle(self.aalpha,beta=self.abeta,gamma=self.agamma)
DL.Ta=T
self.clear_fig()
# if DL.dexist['Ct']['exist']:
# DL.C.locbas(Tt=DL.Ta, Tr=DL.Tb)
# #T channel
# DL.H = DL.C.prop2tran(a=DL.Aa,b=DL.Ab,Friis=True)
# self.plt_all()
@on_trait_change('balpha,bbeta,bgamma')
def update_Tb(self):
""" update rot ant b
"""
T = geu.MEulerAngle(self.balpha,beta=self.bbeta,gamma=self.bgamma)
DL.Tb=T
self.clear_fig()
@on_trait_change('a_ant,fmin,fmax,fstep')
def update_Aa(self):
DL.Aa=Antenna(self.a_ant)
self.clear_fig()
# if DL.Aa.fromfile:
# self.fmin=DL.Aa.fGHz[0]
# self.fmax=DL.Aa.fGHz[-1]
# self.fstep=min(1,DL.Aa.fGHz[1]-DL.Aa.fGHz[0])
@on_trait_change('b_ant,fmin,fmax,fstep')
def update_Ab(self):
DL.Ab=Antenna(self.b_ant)
self.clear_fig()
# if DL.Ab.fromfile:
# self.fmin=DL.Ab.fGHz[0]
# self.fmax=DL.Ab.fGHz[-1]
# self.fstep=min(1,DL.Ab.fGHz[1]-DL.Ab.fGHz[0])
@on_trait_change('fmin,fmax,fstep,chann')
def update_fGHz(self):
if self.Wstd_Enum != 'None':
W=std.Wstandard(self.Wstd_Enum)
# DL.fGHz = W.chan[eval(self.chann)].fghz
Wchan = W.chan[eval(self.chann)]
fcGHz = Wchan['fcGHz']
BWGHz = Wchan['BMHz']
GMHz = Wchan['GMHz']
fGHz = Wchan.fghz
DL.fGHz = np.array([fcGHz])
self.BWGHz = BWGHz
self.fmin = float(fGHz[0])
self.fmax = float(fGHz[-1])
self.fstep = float(fGHz[1]-fGHz[0])
else:
if self.fmin < self.fmax:
DL.fGHz = np.arange(self.fmin,
self.fmax,
self.fstep
)
elif self.fmin == self.fmax:
DL.fGHz=np.array([self.fmin])
self.BWGHz = 5
@on_trait_change('Beval')
def DLeval(self):
DL.eval(verbose=False,
force=self.force,
cutoff=self.cutoff,
threshold=self.threshold/100.,
diffraction = self.diffraction,
nD=self.nD,
nT=self.nT,
nR=self.nR,
applywav = self.applywav)
DL._update_show3(delrays=True)
ER = np.squeeze(DL.H.energy())
DL.R._show3(ER=ER)
self.plt_all()
def plt_all(self):
self.plt_cir()
self.plt_doa()
self.plt_dod()
self.plt_dspread()
self.plt_aspread()
def plt_cir(self):
self.figcir.clf()
ax = self.figcir.add_subplot(111)
DL.plt_cir(fig=self.figcir,ax=ax,BWGHz=self.BWGHz,Nf = 5000 )
# ir = DL.H.getcir(BWGHz=5,Nf=1000)
# ir.plot(fig=self.figcir,ax=ax)
# ax.plot(DL.H.taud,20*np.log10(DL.H.y[:,0,0,0]),'or')
self.figcir.canvas.draw()
# DL.plt_doadod(d='doa')
# DL.H.plot(fig=self.figcir,ax=ax)
# self.figcir.canvas.draw()
def plt_doa(self):
self.figdoa.clf()
ax = self.figdoa.add_subplot(111,polar=True)
# DL.L.showG('s',ax=ax,fig=self.figure)
# DL.H.plotd(d='doa',polar=True,fig=self.figdoa,ax=ax)
DL.plt_doa(polar=True,fig=self.figdoa,ax=ax)
self.figdoa.canvas.draw()
def plt_dod(self):
self.figdod.clf()
ax = self.figdod.add_subplot(111,polar=True)
DL.plt_dod(polar=True,fig=self.figdod,ax=ax)
# DL.L.showG('s',ax=ax,fig=self.figure)
# DL.H.plotd(d='dod',polar=True,fig=self.figdod,ax=ax)
self.figdod.canvas.draw()
def plt_dspread(self):
self.figds.clf()
ax = self.figds.add_subplot(111)
DL.plt_dspread(fig=self.figds,ax=ax)
self.figds.canvas.draw()
def plt_aspread(self):
self.figas.clf()
ax = self.figas.add_subplot(111)
DL.plt_aspread(fig=self.figas,ax=ax)
self.figas.canvas.draw()
def clear_fig(self,lf=['cir','doa','dod','as','ds']):
for f in lf:
eval('self.fig'+f+'.clf()')
eval('self.fig'+f+'.canvas.draw()')
#####
##### RENDERING 3D MAYAVI
#####
render3d = Item('scene', editor=SceneEditor(scene_class=Scene),
height=500, width=1500, show_label=False)
# ###
# ### Matplotlib figure
# ###
# figure = Instance(Figure(figsize=(8,20)), ())
#####
##### Layout SELECTION
#####
# Layout
GLay = Group(Item('Lay_Enum',
style='simple',
label='file'),
show_labels=False,
label='Layout')
#####
##### WIRELESS STANDARD
#####
# wireless standard
Wstd_Enum = Enum('None', av_wstds)
chann = Str
# chann = Enum(av_chann)
GWstd_None = Group(Item('fmin',label='fGHz min',style='text'),
Item('fmax',label='fGHz max',style='text'),
Item('fstep',label='fGHz step',style='text'),
label = 'Frequency',
show_border= True,
enabled_when = 'Wstd_Enum == \'None\''
)
GWstd_std = Group(Item(name ='chann',editor=EnumEditor(name='handler.channels')
) ,
label = 'channel',
show_border= True,
enabled_when = 'Wstd_Enum != \'None\''
)
GWstd = Group(
Group(Item (name = 'Wstd_Enum',
label = 'Wireless Standard')),
GWstd_None,
GWstd_std,
label='Wireless Standard',
show_labels=True,
show_border=False)
#####
##### ANTENNA
#####
xmin=Float
xmax = Float
ymin=Float
ymax = Float
zmin=Float
zmax = Float
# Ant A file
Iax = Item('aX',
editor=RangeEditor(low_name='xmin',
high_name='xmax',
format='%.1f',
label_width=28,
mode='auto'),
label='x'
)
Iay = Item('aY',
editor=RangeEditor(low_name='ymin',
high_name='ymax',
format='%.1f',
label_width=28,
mode='auto'),
label='y'
)
Iaz = Item('aZ',
editor=RangeEditor(low_name='zmin',
high_name='zmax',
format='%.1f',
label_width=28,
mode='auto'),
label='z'
)
GPos_a = VGroup(
Iax,
Iay,
Iaz,
id = 'a',
label = 'Position',
show_border=True,
show_labels=True,
layout='split'
)
Ifile_a = Item('a_ant',label='file')
GRot_a = VGroup(
Item('agamma',label='x-roll'),
Item('abeta',label='y-roll'),
Item('aalpha',label='z-roll'),
id = 'Ta',
label = 'Rotation',
show_border=True,
layout='split'
)
G_a = Group(Ifile_a,
GPos_a,
GRot_a,
label='Antenna a',
show_border=False
)
#### ANtenna B
# Ant B positions
Ibx = Item('bX',
editor=RangeEditor(low_name='xmin',
high_name='xmax',
format='%.1f',
label_width=28,
mode='auto'),
label='x'
)
Iby = Item('bY',
editor=RangeEditor(low_name='ymin',
high_name='ymax',
format='%.1f',
label_width=28,
mode='auto'),
label='y'
)
Ibz = Item('bZ',
editor=RangeEditor(low_name='zmin',
high_name='zmax',
format='%.1f',
label_width=28,
mode='auto'),
label='z'
)
GPos_b = Group(
Ibx,
Iby,
Ibz,
id = 'b',
label = 'Position',
show_border=True,
layout='split'
)
# Ant B file
Ifile_b = Item('b_ant',label='file')
GRot_b = Group(
Item('bgamma',label='x-roll'),
Item('bbeta',label='y-roll'),
Item('balpha',label='z-roll'),
id = 'Tb',
label = 'Rotation',
show_border=True,
layout='split'
)
G_b = Group(Ifile_b,
GPos_b,
GRot_b,
label='Antenna b',
show_border=False,
)
####
#### advanced CONFIRGURATION
####
force =Bool
diffraction = Bool
applywav = Bool
applywav = Bool
low_cutoff = 1
high_cutoff = 30
cutoff = Range(low='low_cutoff',high='high_cutoff',value=DL.cutoff)
threshold = Range(0,100,80)
nD=2
nR=10
nT=10
G_advanced = Group(VGroup(
Item('force',
label='force',
resizable=False,
style='simple'),
Item('cutoff',
label='cutoff',
editor=RangeEditor(low_name='low_cutoff',
high_name='high_cutoff',
label_width=28,
mode='auto'),
width=0.2,
style='simple'),
Item('threshold',
label='threshold',
width=0.2,
style='simple'),
Item('diffraction',
label='diffractions',
style='simple'),
Item('nD',
label='max nb Diffractions',
enabled_when='diffraction' ,
style='simple'),
Item('nR',
label='max nb Reflections',
style='simple'),
Item('nT',
label='max nb Transmissions',
style='simple'),
Item('applywav',
label='applywav',
style='simple'),
label='Ray Tracing Configuration',
show_labels=True,
show_border=False))
####
### MANAGING GROUPS
###
# LEFT GROUP WINDOW
Beval = Button('Launch Ray-Tracing')
GLeft = Group(
GLay,
GWstd,
G_advanced
)
# Antenna GRoup
GAnt_ab = HGroup(spring,G_a,spring,G_b,spring)
GAnt_Eval = Group(GAnt_ab,
HGroup(spring,
Item('Beval',
enabled_when='Lay_Enum != \'\''
),
show_labels=False)
)
#### TOP GROUP
GR_0= HSplit(GLeft,
render3d,
layout='split')
# BOTTOM GROUP
figcir= Instance(Figure(figsize=(8,20)), ())
figdoa= Instance(Figure(figsize=(8,20)), ())
figdod= Instance(Figure(figsize=(8,20)), ())
figas= Instance(Figure(figsize=(8,20)), ())
figds= Instance(Figure(figsize=(8,20)), ())
GExploit = Group ( Group(Item('figcir',
editor=MPLFigureEditor(),
),
label='CIR'),
Group(Item('figdoa',
editor=MPLFigureEditor()
),
label='DOA'),
Group(Item('figdod',
editor=MPLFigureEditor()
),
label='DOD'),
Group(Item('figas',
editor=MPLFigureEditor()
),
label='Ang. Spread'),
Group(Item('figds',
editor=MPLFigureEditor()
),
label='Delay Spread'),
layout='tabbed',
)
GR_1 = HGroup(spring,GAnt_Eval,spring,GExploit)
JWidget = JupyterWidget()
JWidget.show()
view = View(VGroup(GR_0,GR_1),
# menubar=MenuBar(Menu_file),
buttons=['Quit'],
title="Pylayers GUI - beta",
resizable=True,
width=1., height=1.,
handler=WstdHandler)
if __name__ == '__main__':
gui = PylayersGUI()
gui.configure_traits()
| mit |
mxjl620/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
Eric89GXL/scikit-learn | sklearn/metrics/cluster/bicluster/bicluster_metrics.py | 13 | 2597 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_arrays
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
a_rows, a_cols = check_arrays(*a)
b_rows, b_cols = check_arrays(*b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.zeros((n_a, n_b))
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return np.trace(matrix[:, indices[:, 1]]) / max(n_a, n_b)
| bsd-3-clause |
alberlab/alabtools | alabtools/plots.py | 1 | 10761 | # Copyright (C) 2017 University of Southern California and
# Nan Hua
#
# Authors: Nan Hua
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function
__author__ = "Nan Hua"
__license__ = "GPL"
__version__ = "0.0.1"
__email__ = "nhua@usc.edu"
import numpy as np
import warnings
warnings.simplefilter('ignore', UserWarning)
import matplotlib
matplotlib.use('Agg')
from scipy.ndimage.interpolation import zoom
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from .api import Contactmatrix
from .utils import isiterable
def make_colormap(seq,cmapname='CustomMap'):
"""
Return a LinearSegmentedColormap
Parameters
----------
seq : list
a sequence of floats and RGB-tuples. The floats should be increasing and in the interval (0,1).
Example
-------
make_colormap([(1,1,1),(1,0,0)]) is a colormap from white to red
make_colormap([(1,1,1),(1,0,0),0.5,(1,0,0),(0,0,0)],'wrb') is a colormap from white to red to black
Returns
-------
LinearSegmentedColormap
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap(cmapname, cdict)
red = make_colormap([(1,1,1),(1,0,0)])
def plotmatrix(figurename, matrix, title=None, dpi=300, **kwargs):
"""Plot a 2D array with a colorbar.
Parameters
----------
matrix : a 2d numpy array
A 2d array to plot
cmap : matplotlib color map
Color map used in matrix, e.g cm.Reds, cm.bwr
clip_min : float, optional
The lower clipping value. If an element of a matrix is <clip_min, it is
plotted as clip_min.
clip_max : float, optional
The upper clipping value.
label : str, optional
Colorbar label
ticklabels1 : list, optional
Custom tick labels for the first dimension of the matrix.
ticklabels2 : list, optional
Custom tick labels for the second dimension of the matrix.
max_resolution : int, optional
Set a maximum resolution for the output file.
"""
clip_min = kwargs.pop('clip_min', -np.inf)
clip_max = kwargs.pop('clip_max', np.inf)
cwrb = make_colormap([(1,1,1),(1,0,0),0.5,(1,0,0),(0,0,0)],'wrb')
cmap = kwargs.pop('cmap',cwrb)
fig = plt.figure()
if 'ticklabels1' in kwargs:
plt.yticks(range(matrix.shape[0]))
plt.gca().set_yticklabels(kwargs.pop('ticklabels1'))
if 'ticklabels2' in kwargs:
plt.xticks(range(matrix.shape[1]))
plt.gca().set_xticklabels(kwargs.pop('ticklabels2'))
if 'max_resolution' in kwargs:
mr = kwargs.pop('max_resolution')
if len(matrix) > mr:
# use linear interpolation to avoid negative values
matrix = zoom(matrix, float(mr) / len(matrix), order=1)
clipmat = np.clip(matrix, a_min=clip_min, a_max=clip_max)
cmax = kwargs.pop('cmax', clipmat.max())
cmin = kwargs.pop('cmin', clipmat.min())
print("Color Range: ({}, {})".format(cmin, cmax))
im = plt.imshow(clipmat,
interpolation='nearest',
cmap=cmap,
**kwargs)
im.set_clim(cmin, cmax)
if title != None:
plt.title(title)
if 'label' not in kwargs:
plt.colorbar(im)
else:
plt.colorbar(im).set_label(kwargs['label'])
if figurename[-3:] == 'png':
fig.savefig(figurename, dpi=dpi)
elif figurename[-3:] == 'pdf':
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages(figurename)
pp.savefig(fig, dpi=dpi)
pp.close()
plt.show()
plt.close(fig)
def plotxy(figurename, x, y, color='blue', linewidth=1, points=False, xlab=None, ylab=None,title=None, xlim=None, ylim=None, grid=False, xticklabels=None, yticklabels=None, vline=None, hline=None, **kwargs):
"""xy plot
Parameters:
-----------
x,y: dataset used to plot
format: str
format to save figure
color: drawing color
linewidth:
points : True or False, if scatter points are required
xlab/ylab : string, optional
label for x/y axis
title : string, optional
title of the figure
xlim,ylim :tuples for xlim, ylim
vline/hline: float or array, optional
draw a vertical/horizontal line at certain position(s)
xticks/yticks: ticks for x,y axis
"""
fig = plt.figure()
ax = fig.add_subplot(111)
line = ax.plot(x,y,c=color,**kwargs)
plt.setp(line,linewidth=linewidth)
if points:
ax.scatter(x,y, marker='o',c=color,edgecolors=color)
if xlab != None:
ax.set_xlabel(xlab)
if ylab != None:
ax.set_ylabel(ylab)
if title != None:
ax.set_title(title)
if xticklabels != None:
ax.set_xticklabels(xticklabels)
if yticklabels != None:
ax.set_yticklabels(yticklabels)
if xlim != None:
ax.set_xlim(xlim[0],xlim[1])
if ylim != None:
ax.set_ylim(ylim[0],ylim[1])
if grid:
ax.grid(True)
if vline != None:
for l in np.array([vline]).flatten():
ax.axvline(l, color='c', linestyle='dashed', linewidth=1)
if hline != None:
for l in np.array([hline]).flatten():
ax.axhline(l, color='c', linestyle='dashed', linewidth=1)
plt.show()
if figurename[-3:] == 'png':
fig.savefig(figurename, dpi=600)
elif figurename[-3:] == 'pdf':
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages(figurename)
pp.savefig(fig, dpi=600)
pp.close()
plt.close(fig)
def plot_comparison(m1, m2, chromosome=None, file=None, dpi=300, labels=None, title='', **kwargs):
if not isinstance(m1, Contactmatrix):
m1 = Contactmatrix(m1)
if not isinstance(m2, Contactmatrix):
m2 = Contactmatrix(m2)
if chromosome is not None:
m1 = m1[chromosome]
m2 = m2[chromosome]
cwrb = make_colormap([(1,1,1),(1,0,0),0.5,(1,0,0),(0,0,0)],'wrb')
cmap = kwargs.pop('cmap',cwrb)
m = np.tril( m1.matrix.toarray(), -1 ) + np.triu( m2.matrix.toarray(), 1 )
fig = plt.figure(figsize=(10,10))
plt.title(title)
plt.imshow(m, cmap=cmap, **kwargs)
if labels:
plt.text(0.1, 0.1, labels[0], transform=plt.gca().transAxes)
plt.text(0.9, 0.9, labels[1], transform=plt.gca().transAxes, horizontalalignment='right', verticalalignment='top')
plt.colorbar()
if file is not None:
plt.tight_layout()
plt.savefig(file, dpi=dpi)
return fig
def plot_by_chromosome(data, index, xscale=1e-6, ncols=4, subplot_width=2.5, subplot_height=2.5,
sharey=True, subtitlesize=20, ticklabelsize=12, xgridstep=50e6,
datalabels=None, highlight_zones=None, highlight_colors='red', vmin=None, vmax=None,
suptitle=''):
'''
Plot tracks by chromosomes as subplots
TODO: write docs
'''
if not isinstance(index, list) or isinstance(index, tuple):
index = [index] * len(data)
data = np.array(data)
if len(data.shape) == 1:
data = np.array([data])
assert data.shape[1] == len(index[0])
else:
assert len(data) == len(index)
for i, d in zip(index, data):
assert len(i) == len(d)
for i in range(len(data)):
data[i] = np.array(data[i])
if datalabels is not None:
assert len(data) == len(datalabels)
if highlight_zones is not None:
highlight_zones = np.array(highlight_zones)
if not isiterable(highlight_colors):
highlight_colors = [highlight_colors] * len(highlight_zones)
highlight_colors = np.array(highlight_colors)
chroms = index[0].get_chromosomes() # multiple data better have the same chromosomes
n_chroms = len(chroms)
n_cols = 4
n_rows = n_chroms // n_cols if n_chroms % n_cols == 0 else n_chroms // n_cols + 1
f, plots = plt.subplots(n_rows, n_cols, figsize=(subplot_width * n_cols, subplot_height * n_rows), sharey=sharey)
f.suptitle(suptitle)
if vmin is None:
vmin = np.nanmin([ np.nanmin(d) for d in data])
if vmax is None:
vmax = np.nanmax([ np.nanmax(d) for d in data])
for i in range(n_chroms):
col = i % n_cols
row = i // n_cols
if highlight_zones is not None:
ii = np.flatnonzero(highlight_zones[:, 0] == index[0].id_to_chrom(i))
ch = highlight_zones[ii]
hgcolors = highlight_colors[ii]
for (c, s, e), color in zip(ch, hgcolors):
plots[row, col].fill_between([int(s)*xscale, int(e)*xscale], [vmin, vmin], [vmax, vmax], color=color)
plots[row, col].set_ylim(vmin, vmax)
plots[row, col].set_title(index[0].id_to_chrom(i), fontsize=subtitlesize)
for k in range(len(data)):
jj = index[k].chrom == chroms[i]
xx = index[k].start[jj] * xscale
label = datalabels[k] if datalabels else ''
plots[row, col].plot(xx, data[k][jj], linewidth=2, label=label)
if datalabels:
plots[row, col].legend()
# sets the grid
if xgridstep is not None:
xticks = np.arange(index[k].start[jj][0], index[k].end[jj][-1], xgridstep) * xscale
plots[row, col].set_xticks(xticks)
plots[row, col].grid(True, color='grey', linestyle='--',)
# hide tick labels
#plots[row, col].set_xticks( np.arange(0, scale*N, 50) )
plots[row, col].tick_params(axis='both', which='major', labelsize=ticklabelsize, direction='in')
#plots[row, col].set_xticklabels([''] * len(plots[row, col].get_xticklabels()))
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
return f, plots
| gpl-3.0 |
clovett/MissionPlanner | Lib/site-packages/numpy/lib/twodim_base.py | 70 | 23431 | """ Basic functions for manipulating 2d arrays
"""
__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
'tril','vander','histogram2d','mask_indices',
'tril_indices','tril_indices_from','triu_indices','triu_indices_from',
]
from numpy.core.numeric import asanyarray, equal, subtract, arange, \
zeros, greater_equal, multiply, ones, asarray, alltrue, where, \
empty
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Does not require the array to be
two-dimensional.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1,...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0,1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0,1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triange of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n,n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
if k >= s[1]:
return empty(0, dtype=v.dtype)
if v.flags.f_contiguous:
# faster slicing
v, k, s = v.T, -k, s[::-1]
if k >= 0:
i = k
else:
i = (-k) * s[1]
return v[:s[1]-k].flat[i::s[1]+1]
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n,n), v.dtype)
if (k >= 0):
i = arange(0,n-k)
fi = i+k+i*n
else:
i = arange(0,n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
T : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
return m.astype(dtype)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
L : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=int),m)
return out
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
out = multiply((1 - tri(m.shape[0], m.shape[1], k - 1, int)), m)
return out
# borrowed from John Hunter and matplotlib
def vander(x, N=None):
"""
Generate a Van der Monde matrix.
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the `i`-th output column is the input vector
raised element-wise to the power of ``N - i - 1``. Such a matrix with
a geometric progression in each row is named for Alexandre-Theophile
Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Order of (number of columns in) the output. If `N` is not specified,
a square array is returned (``N = len(x)``).
Returns
-------
out : ndarray
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if N is None:
N=len(x)
X = ones( (len(x),N), x.dtype)
for i in range(N - 1):
X[:,i] = x**(N - i - 1)
return X
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape(N,)
A sequence of values to be histogrammed along the first dimension.
y : array_like, shape(M,)
A sequence of values to be histogrammed along the second dimension.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension (nx, ny = bins).
* If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension (x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, i.e. the bin count divided by the bin area.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights
are normalized to 1 if `normed` is True. If `normed` is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram: 1D histogram
histogramdd: Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample density,
defined such that:
.. math::
\\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
the area of bin `{i,j}`.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abcissa and `y` values on the ordinate axis.
Rather, `x` is histogrammed along the first dimension of the array
(vertical), and `y` along the second dimension of the array (horizontal).
This ensures compatibility with `histogramdd`.
Examples
--------
>>> x, y = np.random.randn(2, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8))
>>> H.shape, xedges.shape, yedges.shape
((5, 8), (6,), (9,))
We can now use the Matplotlib to visualize this 2-dimensional histogram:
>>> extent = [yedges[0], yedges[-1], xedges[-1], xedges[0]]
>>> import matplotlib.pyplot as plt
>>> plt.imshow(H, extent=extent, interpolation='nearest')
<matplotlib.image.AxesImage object at ...>
>>> plt.colorbar()
<matplotlib.colorbar.Colorbar instance at ...>
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x,y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n,n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
Parameters
----------
n : int
The row dimension of the square arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return mask_indices(n, tril, k)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return tril_indices(arr.shape[0], k)
def triu_indices(n, k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return mask_indices(n, triu, k)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
See `triu_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `triu` for details).
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return triu_indices(arr.shape[0],k)
| gpl-3.0 |
matthiasmengel/sealevel | sealevel/print_projection_numbers.py | 1 | 4093 | # This file is part of SEALEVEL - a tool to estimates future sea-level rise
# constrained by past obervations and long-term sea-level commitment
# Copyright (C) 2016 Matthias Mengel working at PIK Potsdam
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE.txt for more details.
""" matthias.mengel@pik
"""
import os
import glob
import sys
import numpy as np
# import netCDF4 as nc
import matplotlib.pyplot as plt
from matplotlib import cm
import itertools
from scipy.io import loadmat
lib_path = os.path.abspath('../src')
sys.path.append(lib_path)
import dimarray as da
import optparse
from mpl_toolkits.axes_grid1 import make_axes_locatable
import timeit
import cPickle as pickle
# make use of -n or -g flags with this script
parser = optparse.OptionParser()
parser.add_option('-n', '--realizations', type="int",
help='get number of realizations')
parser.add_option(
'-g',
help='get new data',
dest='get_data',
default=False,
action='store_true')
(opts, args) = parser.parse_args()
contrib_ids = ["thermexp", "gic", "gis_sid", "gis_smb", "ant_sid", "ant_smb"]
rcpcoldict = {"RCP3PD": "#3b4ba8", "RCP45": "yellow", "RCP85": "#ee1d23"}
labels = {"gic": "Mountain glaciers", "thermexp": "Thermal expansion", "gis_sid": "Greenland SID",
"gis_smb": "Greenland SMB", "ant_sid": "Antarctica SID", "ant_smb": "Antarctica SMB"}
if opts.get_data:
realizations = 10000
realizations = realizations if opts.realizations is None else opts.realizations
print "use", realizations, "realizations"
projection_data = pickle.load(
open(
"../data/projection/projected_slr_" +
str(realizations) +
"samples.pkl",
"rb"))
def rd(array, percentile, years):
if isinstance(years, int):
numb = array[years]
else:
numb = array[years].mean(axis="time")
perc = np.percentile(numb, percentile)
# in mm
return '%s' % float('%.3g' % perc)
## switch for the 2081-2100 mean
printyr = 2100
#printyr = np.arange(2081,2101,1)
print "## years", printyr
print "RCP3PD", "RCP45", "RCP85"
for i, name in enumerate(contrib_ids):
print labels[name], ":",
for k, scen in enumerate(["RCP3PD", "RCP45", "RCP85", ]):
contrib = projection_data[scen][name] * 1.e3
# anomaly to 1986-2005
contrib = contrib - contrib[1986:2005, :].mean(axis="time")
if scen != "RCP85":
print rd(contrib, 50, printyr), "(", rd(contrib, 5, printyr), "to", rd(contrib, 95, printyr), ")",
else:
print rd(contrib, 50, printyr), "(", rd(contrib, 5, printyr), "to", rd(contrib, 95, printyr), ")"
#### total slr; sum up contributions first ####
def rdd(numb):
# in mm
return '%s' % float('%.4g' % numb)
print "total", ":",
for k, scen in enumerate(["RCP3PD", "RCP45", "RCP85", ]):
total_slr = da.zeros_like(projection_data[scen]["thermexp"])
for i, name in enumerate(contrib_ids):
# sum up all contributions
single_contrib = projection_data[scen][name]
# if nans occur, clear these contributions
# single_contrib[np.isnan(single_contrib)] = 0.
total_slr += single_contrib
di_total_slr = da.DimArray(total_slr, dims=["time", "runnumber"])
di_total_slr -= di_total_slr[1986:2005, :].mean(axis="time")
if isinstance(printyr, int):
mn = di_total_slr[printyr, :] * 1.e3
else:
mn = di_total_slr[printyr, :].mean(axis="time") * 1.e3
# mn = di_total_slr[2081:2100,:].mean(axis="time")
low = np.percentile(mn, 5)
med = np.percentile(mn, 50)
upp = np.percentile(mn, 95)
print rdd(med), "(", rdd(low), "to", rdd(upp), ")",
| gpl-3.0 |
mattcaldwell/zipline | zipline/transforms/ta.py | 6 | 8006 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import math
import numpy as np
import pandas as pd
import talib
import copy
from six import iteritems
from zipline.transforms import BatchTransform
def zipline_wrapper(talib_fn, key_map, data):
# get required TA-Lib input names
if 'price' in talib_fn.input_names:
req_inputs = [talib_fn.input_names['price']]
elif 'prices' in talib_fn.input_names:
req_inputs = talib_fn.input_names['prices']
else:
req_inputs = []
# If there are multiple output names then the results are named,
# if there is only one output name, it usually 'real' is best represented
# by a float.
# Use a DataFrame to map sid to named values, and a Series map sid
# to floats.
if len(talib_fn.output_names) > 1:
all_results = pd.DataFrame(index=talib_fn.output_names,
columns=data.minor_axis)
else:
all_results = pd.Series(index=data.minor_axis)
for sid in data.minor_axis:
# build talib_data from zipline data
talib_data = dict()
for talib_key, zipline_key in iteritems(key_map):
# if zipline_key is found, add it to talib_data
if zipline_key in data:
values = data[zipline_key][sid].values
# Do not include sids that have only nans, passing only nans
# is incompatible with many of the underlying TALib functions.
if pd.isnull(values).all():
break
else:
talib_data[talib_key] = data[zipline_key][sid].values
# if zipline_key is not found and not required, add zeros
elif talib_key not in req_inputs:
talib_data[talib_key] = np.zeros(data.shape[1])
# if zipline key is not found and required, raise error
else:
raise KeyError(
'Tried to set required TA-Lib data with key '
'\'{0}\' but no Zipline data is available under '
'expected key \'{1}\'.'.format(
talib_key, zipline_key))
# call talib
if talib_data:
talib_result = talib_fn(talib_data)
# keep only the most recent result
if isinstance(talib_result, (list, tuple)):
sid_result = tuple([r[-1] for r in talib_result])
else:
sid_result = talib_result[-1]
all_results[sid] = sid_result
return all_results
def make_transform(talib_fn, name):
"""
A factory for BatchTransforms based on TALIB abstract functions.
"""
# make class docstring
header = '\n#---- TA-Lib docs\n\n'
talib_docs = getattr(talib, talib_fn.info['name']).__doc__
divider1 = '\n#---- Default mapping (TA-Lib : Zipline)\n\n'
mappings = '\n'.join(' {0} : {1}'.format(k, v)
for k, v in talib_fn.input_names.items())
divider2 = '\n\n#---- Zipline docs\n'
help_str = (header + talib_docs + divider1 + mappings
+ divider2)
class TALibTransform(BatchTransform):
__doc__ = help_str + """
TA-Lib keyword arguments must be passed at initialization. For
example, to construct a moving average with timeperiod of 5, pass
"timeperiod=5" during initialization.
All abstract TA-Lib functions accept a data dictionary containing
'open', 'high', 'low', 'close', and 'volume' keys, even if they do
not require those keys to run. For example, talib.MA (moving
average) is always computed using the data under the 'close'
key. By default, Zipline constructs this data dictionary with the
appropriate sid data, but users may overwrite this by passing
mappings as keyword arguments. For example, to compute the moving
average of the sid's high, provide "close = 'high'" and Zipline's
'high' data will be used as TA-Lib's 'close' data. Similarly, if a
user had a data column named 'Oil', they could compute its moving
average by passing "close='Oil'".
**Example**
A moving average of a data column called 'Oil' with timeperiod 5,
talib.transforms.ta.MA(close='Oil', timeperiod=5)
The user could find the default arguments and mappings by calling:
help(zipline.transforms.ta.MA)
**Arguments**
open : string, default 'open'
high : string, default 'high'
low : string, default 'low'
close : string, default 'price'
volume : string, default 'volume'
refresh_period : int, default 0
The refresh_period of the BatchTransform determines the number
of iterations that pass before the BatchTransform updates its
internal data.
\*\*kwargs : any arguments to be passed to the TA-Lib function.
"""
def __init__(self,
close='price',
open='open',
high='high',
low='low',
volume='volume',
refresh_period=0,
bars='daily',
**kwargs):
key_map = {'high': high,
'low': low,
'open': open,
'volume': volume,
'close': close}
self.call_kwargs = kwargs
# Make deepcopy of talib abstract function.
# This is necessary because talib abstract functions remember
# state, including parameters, and we need to set the parameters
# in order to compute the lookback period that will determine the
# BatchTransform window_length. TALIB has no way to restore default
# parameters, so the deepcopy lets us change this function's
# parameters without affecting other TALibTransforms of the same
# function.
self.talib_fn = copy.deepcopy(talib_fn)
# set the parameters
for param in self.talib_fn.get_parameters().keys():
if param in kwargs:
self.talib_fn.set_parameters({param: kwargs[param]})
# get the lookback
self.lookback = self.talib_fn.lookback
self.bars = bars
if bars == 'daily':
lookback = self.lookback + 1
elif bars == 'minute':
lookback = int(math.ceil(self.lookback / (6.5 * 60)))
# Ensure that window_length is at least 1 day's worth of data.
window_length = max(lookback, 1)
transform_func = functools.partial(
zipline_wrapper, self.talib_fn, key_map)
super(TALibTransform, self).__init__(
func=transform_func,
refresh_period=refresh_period,
window_length=window_length,
compute_only_full=False,
bars=bars)
def __repr__(self):
return 'Zipline BatchTransform: {0}'.format(
self.talib_fn.info['name'])
TALibTransform.__name__ = name
# return class
return TALibTransform
# add all TA-Lib functions to locals
for name in talib.abstract.__FUNCTION_NAMES:
fn = getattr(talib.abstract, name)
locals()[name] = make_transform(fn, name)
| apache-2.0 |
alliemacleay/MachineLearning_CS6140 | Tests/hw4_tests.py | 1 | 4429 | from sklearn.metrics import roc_auc_score
__author__ = 'Allison MacLeay'
import CS6140_A_MacLeay.utils.Adaboost as adab
import CS6140_A_MacLeay.utils as utils
import CS6140_A_MacLeay.Homeworks.HW4 as decTree
import CS6140_A_MacLeay.Homeworks.HW3 as hw3
import CS6140_A_MacLeay.Homeworks.hw4 as hw4
import CS6140_A_MacLeay.Homeworks.HW4.plots as plt
import CS6140_A_MacLeay.Homeworks.HW4.data_load as dl
from sklearn import tree
from sklearn.datasets import load_iris, make_classification
import numpy as np
import os
def UnitTests():
#AdaboostErrorTest()
#AdaboostWrongTest()
#TestAbstract()
#changeWeight()
TreeTest2()
#TreeTest()
#testPlot()
#testBranchOptimal()
#dataloads()
def dataloads():
crx_data()
dl.data_q4()
def testPlot():
directory = '/Users/Admin/Dropbox/ML/MachineLearning_CS6140/CS6140_A_MacLeay/Homeworks'
path= os.path.join(directory, 'test.pdf')
plot = plt.Errors([[1,2,3]]).plot_all_errors(path)
def TestAbstract():
d = get_test_always_right()
ada = adab.AdaboostOptimal(1)
ada.run(d)
ada.print_stats()
def TreeTest():
spamDat = spamData()
k = 10
all_folds = hw3.partition_folds(spamDat, k)
num_in_fold = []
err_in_fold = []
for i in range(len(all_folds) - 1):
spam = all_folds[i]
num_in_fold.append(len(spam))
truth, f_data = decTree.split_truth_from_data(spam)
tree = decTree.TreeOptimal(max_depth=2)
#tree = decTree.TreeRandom()
tree.fit(f_data, truth)
print 'Prediction...\n'
predict = tree.predict(f_data)
print predict
print truth
error = 1. - hw3.get_accuracy(predict, truth)
err_in_fold.append(error)
print 'Tree error is: {}'.format(error)
spam = all_folds[k -1]
truth, f_data = decTree.split_truth_from_data(spam)
tree = decTree.TreeOptimal(max_depth=2)
#tree = decTree.TreeRandom()
tree.fit(f_data, truth)
predict = tree.predict(f_data)
error = 1. - hw3.get_accuracy(predict, truth)
sum_training_err = 0
for i in range(len(num_in_fold)):
sum_training_err += err_in_fold[i]
#sum_training_err += float(err_in_fold)/num_in_fold
average_training_error = float(sum_training_err)/len(num_in_fold)
print 'Average training error: {}\nAverage testing error: {}'.format(average_training_error, error)
def TreeTest2():
iris = load_iris()
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X, y)
print(roc_auc_score(y, clf.predict(X)))
clf2 = decTree.TreeOptimal()
clf2.fit(X, y)
print(roc_auc_score(y, clf2.predict(X)))
def testBranchOptimal():
data, truth = get_test_theta()
branch = decTree.BranchOptimal(data, truth, np.ones(len(data)))
theta = branch.choose_theta(data, truth)
if theta != 5.5:
print 'Optimal is broken! {} != 5.5'.format(theta)
else:
print 'Optimal works'
def AdaboostErrorTest():
print 'Always right'
spamData = get_test_always_right()
adaboost_run(spamData)
def AdaboostWrongTest():
print 'Always wrong'
d = get_test_always_wrong()
adaboost_run(d)
def changeWeight():
d = get_test_half_right()
adaboost_run(d, 3)
def adaboost_run(data, num_rounds=2):
adaboost = adab.AdaboostOptimal(num_rounds)
adaboost.run(data)
adaboost.print_stats()
def get_test_always_right():
d = np.ones(shape=(100, 2))
return d
def get_test_theta():
d = [10, 8, 8, 2, 2, 3, 0, 0, 0]
y = [-1, -1, -1, 1, 1, 1, -1, -1, -1]
return d, y
def get_test_always_wrong():
d = np.zeros(shape=(100, 2))
return d
def get_test_half_right():
d = np.ones(shape=(100, 2))
for i in range(len(d)/2):
d[i][-1] = 0
#print d
return d
def testData():
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
return X, y
def spamData():
return hw3.pandas_to_data(hw3.load_and_normalize_spambase())
def crx_data():
dl.data_q3_crx()
dl.data_q3_vote()
if __name__ == '__main__':
#decTree.q1()
#hw4.q1()
#UnitTests()
#hw4.q2()
#hw4.q3()
hw4.q4()
#hw4.q6()
#hw4.q7()
| mit |
antoinecarme/pyaf | tests/HourOfWeek/test_Business_Hourly_LunchTime.py | 1 | 2087 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
np.random.seed(seed=1960)
#get_ipython().magic('matplotlib inline')
df = pd.DataFrame()
lTimeVar = 'Time'
lSignalVar = 'Signal'
N = 10000
df[lTimeVar + '_Hourly'] = pd.date_range('2000-1-1', periods=N, freq='1h')
df['Hour'] = df[lTimeVar + '_Hourly'].dt.hour
df['Day'] = df[lTimeVar + '_Hourly'].dt.dayofweek
df[lSignalVar] = 5 + np.random.randn(N) + 10 * df['Hour'].apply(lambda x : x if (12 <= x and x < 14) else 23) * df['Day'].apply(lambda x : x if (x < 4) else 12)
print(df.head())
print(df.info())
#df.to_csv("outputs/ozone_WDHMS.csv");
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
for k in [1]:
for timevar in [lTimeVar + '_Hourly']:
lEngine = autof.cForecastEngine()
lEngine
H = 24;
# lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.mFilterSeasonals = True;
lEngine.mOptions.mDebugCycles = False;
lEngine.mOptions.set_active_autoregressions([]);
lEngine.train(df , timevar , lSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
dfapp_in = df.copy();
dfapp_in.tail()
# H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_" + timevar + "apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[timevar , lSignalVar, lSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
lEngine.standardPlots(name = "outputs/ozone_LunchTime_" + timevar)
| bsd-3-clause |
antworteffekt/EDeN | eden/iterated_semisupervised_feature_selection.py | 1 | 7729 | import random
import logging
import numpy as np
from sklearn.semi_supervised import LabelSpreading
from sklearn.feature_selection import RFECV
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import LabelEncoder
logger = logging.getLogger(__name__)
def semisupervised_target(target=None,
unknown_fraction=None,
known_fraction=None,
random_state=1):
"""Simulates partial knowledge on the targets by randomly masking
targets with the value -1.
Parameters
----------
target : array-like, shape = (n_samples)
Class labels.
unknown_fraction : float
Fraction of desired unknown labels.
known_fraction : float
Fraction of desired known labels.
random_state : int (default 1)
Seed for the random number generator.
Returns
-------
target : array-like, shape = (n_samples)
Class labels using -1 for the unknown class.
"""
if unknown_fraction is not None and known_fraction is not None:
if unknown_fraction != 1 - known_fraction:
raise Exception('unknown_fraction and known_fraction are inconsistent. bailing out')
target = LabelEncoder().fit_transform(target)
if known_fraction is not None:
unknown_fraction = 1 - known_fraction
if unknown_fraction == 0:
return target
elif unknown_fraction == 1:
return None
else:
labels = []
known_fraction = 0.2
class_set = set(target)
class_samples = {}
for c in class_set:
cs = [id for id in range(len(target)) if target[id] == c]
n_desired = int(max(1, len(cs) * known_fraction))
class_samples[c] = random.sample(cs, n_desired)
for id in range(len(target)):
if id in class_samples[target[id]]:
val = target[id]
else:
val = -1
labels.append(val)
return np.array(labels).reshape(-1, 1)
class IteratedSemiSupervisedFeatureSelection(object):
"""Feature selection estimator that uses an iterated approach in a semisueprvised setting.
Parameters
----------
estimator: scikit-learn estimator (default SGDClassifier)
Estimator used in the recursive feature elimination algorithm.
n_iter : int (default 30)
The maximum number of iterations.
min_feature_ratio : float (default 0.1)
The ratio between the initial number of features and the number
of features after the selection.
"""
def __init__(self,
estimator=SGDClassifier(average=True, shuffle=True, penalty='elasticnet'),
n_iter=30,
step=0.1,
cv=5,
min_feature_ratio=0.1,
n_neighbors=5):
self.estimator = estimator
self.n_iter = n_iter
self.step = step
self.cv = cv
self.min_feature_ratio = min_feature_ratio
self.n_neighbors = n_neighbors
self.feature_selectors = []
def fit(self, data_matrix=None, target=None):
"""Fit the estimator on the samples.
Parameters
----------
data_matrix : array-like, shape = (n_samples, n_features)
Samples.
target : array-like, shape = (n_samples, )
Array containing partial class information (use -1 to indicate unknown class).
Returns
-------
self
"""
n_features_orig = data_matrix.shape[1]
for i in range(self.n_iter):
n_features_input = data_matrix.shape[1]
target_new = self._semi_supervised_learning(data_matrix, target)
if len(set(target_new)) < 2:
# remove last feature_selector since it does not satisfy conditions
self.feature_selectors.pop(-1)
break
data_matrix = self._feature_selection(data_matrix, target_new)
n_features_output = data_matrix.shape[1]
if self._terminate(n_features_orig, n_features_input, n_features_output):
if len(self.feature_selectors) > 0:
# remove last feature_selector since it does not satisfy conditions
self.feature_selectors.pop(-1)
break
return self
def _terminate(self, n_features_orig, n_features_input, n_features_output):
if n_features_output == n_features_input:
return True
if n_features_output < n_features_orig * self.min_feature_ratio:
return True
if n_features_output < 3:
return True
return False
def transform(self, data_matrix=None):
"""Reduce the data matrix to the features selected in the fit phase.
Parameters
----------
data_matrix : array, shape = (n_samples, n_features)
Samples.
Returns
-------
data_matrix : array, shape = (n_samples, n_features_new)
Transformed array.
"""
data_matrix_new = data_matrix.copy()
for feature_selector in self.feature_selectors:
data_matrix_new = feature_selector.transform(data_matrix_new)
return data_matrix_new
def fit_transform(self, data_matrix=None, target=None):
"""Fit the estimator on the samples and reduce the data matrix to
the selected features.
Iterate semi supervised label spreading and feature selection:
due to feature selection the metric space changes and with it so does
the result of label spreading.
Parameters
----------
data_matrix : array-like, shape = (n_samples, n_features)
Samples.
target : array-like, shape = (n_samples, )
Array containing class information.
Returns
-------
data_matrix : array, shape = (n_samples, n_features_new)
Transformed array.
"""
data_matrix_copy = data_matrix.copy()
self.fit(data_matrix, target)
return self.transform(data_matrix_copy)
def _semi_supervised_learning(self, data_matrix, target):
n_classes = len(set(target))
# if there are too few classes (e.g. less than -1 and at least 2 other classes)
# then just bail out and return the original target
# otherwise one cannot meaningfully spread the information of only one class
if n_classes > 2:
semi_supervised_estimator = LabelSpreading(kernel='knn', n_neighbors=self.n_neighbors)
semi_supervised_estimator.fit(data_matrix, target)
predicted_target = semi_supervised_estimator.predict(data_matrix)
predicted_target = self._clamp(target, predicted_target)
return predicted_target
else:
return target
def _clamp(self, target, predicted_target):
extended_target = []
for pred_label, label in zip(predicted_target, target):
if label != -1 and pred_label != label:
extended_target.append(label)
else:
extended_target.append(pred_label)
return np.array(extended_target).reshape(-1, 1)
def _feature_selection(self, data_matrix, target):
try:
# perform recursive feature elimination
feature_selector = RFECV(self.estimator, step=self.step, cv=self.cv)
data_matrix_out = feature_selector.fit_transform(data_matrix, target)
self.feature_selectors.append(feature_selector)
return data_matrix_out
except Exception as e:
logger.debug(e)
return data_matrix
| mit |
jjhelmus/scipy | scipy/fftpack/basic.py | 2 | 21565 | """
Discrete Fourier Transforms - basic.py
"""
# Created by Pearu Peterson, August,September 2002
from __future__ import division, print_function, absolute_import
__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
'fft2','ifft2']
from numpy import zeros, swapaxes
import numpy
from . import _fftpack
import atexit
atexit.register(_fftpack.destroy_zfft_cache)
atexit.register(_fftpack.destroy_zfftnd_cache)
atexit.register(_fftpack.destroy_drfft_cache)
atexit.register(_fftpack.destroy_cfft_cache)
atexit.register(_fftpack.destroy_cfftnd_cache)
atexit.register(_fftpack.destroy_rfft_cache)
del atexit
def istype(arr, typeclass):
return issubclass(arr.dtype.type, typeclass)
def _datacopied(arr, original):
"""
Strict check for `arr` not sharing any data with `original`,
under the assumption that arr = asarray(original)
"""
if arr is original:
return False
if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'):
return False
return arr.base is None
# XXX: single precision FFTs partially disabled due to accuracy issues
# for large prime-sized inputs.
#
# See http://permalink.gmane.org/gmane.comp.python.scientific.devel/13834
# ("fftpack test failures for 0.8.0b1", Ralf Gommers, 17 Jun 2010,
# @ scipy-dev)
#
# These should be re-enabled once the problems are resolved
def _is_safe_size(n):
"""
Is the size of FFT such that FFTPACK can handle it in single precision
with sufficient accuracy?
Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those
"""
n = int(n)
if n == 0:
return True
# Divide by 3 until you can't, then by 5 until you can't
for c in (3, 5):
while n % c == 0:
n //= c
# Return True if the remainder is a power of 2
return not n & (n-1)
def _fake_crfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.crfft(x, n, *a, **kw)
else:
return _fftpack.zrfft(x, n, *a, **kw).astype(numpy.complex64)
def _fake_cfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.cfft(x, n, *a, **kw)
else:
return _fftpack.zfft(x, n, *a, **kw).astype(numpy.complex64)
def _fake_rfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.rfft(x, n, *a, **kw)
else:
return _fftpack.drfft(x, n, *a, **kw).astype(numpy.float32)
def _fake_cfftnd(x, shape, *a, **kw):
if numpy.all(list(map(_is_safe_size, shape))):
return _fftpack.cfftnd(x, shape, *a, **kw)
else:
return _fftpack.zfftnd(x, shape, *a, **kw).astype(numpy.complex64)
_DTYPE_TO_FFT = {
# numpy.dtype(numpy.float32): _fftpack.crfft,
numpy.dtype(numpy.float32): _fake_crfft,
numpy.dtype(numpy.float64): _fftpack.zrfft,
# numpy.dtype(numpy.complex64): _fftpack.cfft,
numpy.dtype(numpy.complex64): _fake_cfft,
numpy.dtype(numpy.complex128): _fftpack.zfft,
}
_DTYPE_TO_RFFT = {
# numpy.dtype(numpy.float32): _fftpack.rfft,
numpy.dtype(numpy.float32): _fake_rfft,
numpy.dtype(numpy.float64): _fftpack.drfft,
}
_DTYPE_TO_FFTN = {
# numpy.dtype(numpy.complex64): _fftpack.cfftnd,
numpy.dtype(numpy.complex64): _fake_cfftnd,
numpy.dtype(numpy.complex128): _fftpack.zfftnd,
# numpy.dtype(numpy.float32): _fftpack.cfftnd,
numpy.dtype(numpy.float32): _fake_cfftnd,
numpy.dtype(numpy.float64): _fftpack.zfftnd,
}
def _asfarray(x):
"""Like numpy asfarray, except that it does not modify x dtype if x is
already an array with a float dtype, and do not cast complex types to
real."""
if hasattr(x, "dtype") and x.dtype.char in numpy.typecodes["AllFloat"]:
# 'dtype' attribute does not ensure that the
# object is an ndarray (e.g. Series class
# from the pandas library)
if x.dtype == numpy.half:
# no half-precision routines, so convert to single precision
return numpy.asarray(x, dtype=numpy.float32)
return numpy.asarray(x, dtype=x.dtype)
else:
# We cannot use asfarray directly because it converts sequences of
# complex to sequence of real
ret = numpy.asarray(x)
if ret.dtype == numpy.half:
return numpy.asarray(ret, dtype=numpy.float32)
elif ret.dtype.char not in numpy.typecodes["AllFloat"]:
return numpy.asfarray(x)
return ret
def _fix_shape(x, n, axis):
""" Internal auxiliary function for _raw_fft, _raw_fftnd."""
s = list(x.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
x = x[index]
return x, False
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s,x.dtype.char)
z[index] = x
return z, True
def _raw_fft(x, n, axis, direction, overwrite_x, work_function):
""" Internal auxiliary function for fft, ifft, rfft, irfft."""
if n is None:
n = x.shape[axis]
elif n != x.shape[axis]:
x, copy_made = _fix_shape(x,n,axis)
overwrite_x = overwrite_x or copy_made
if n < 1:
raise ValueError("Invalid number of FFT data points "
"(%d) specified." % n)
if axis == -1 or axis == len(x.shape)-1:
r = work_function(x,n,direction,overwrite_x=overwrite_x)
else:
x = swapaxes(x, axis, -1)
r = work_function(x,n,direction,overwrite_x=overwrite_x)
r = swapaxes(r, axis, -1)
return r
def fft(x, n=None, axis=-1, overwrite_x=False):
"""
Return discrete Fourier transform of real or complex sequence.
The returned complex array contains ``y(0), y(1),..., y(n-1)`` where
``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``.
Parameters
----------
x : array_like
Array to Fourier transform.
n : int, optional
Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the fft's are computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
z : complex ndarray
with the elements::
[y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even
[y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd
where::
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1
Note that ``y(-j) = y(n-j).conjugate()``.
See Also
--------
ifft : Inverse FFT
rfft : FFT of a real sequence
Notes
-----
The packing of the result is "standard": If ``A = fft(a, n)``, then
``A[0]`` contains the zero-frequency term, ``A[1:n/2]`` contains the
positive-frequency terms, and ``A[n/2:]`` contains the negative-frequency
terms, in order of decreasingly negative frequency. So for an 8-point
transform, the frequencies of the result are [0, 1, 2, 3, -4, -3, -2, -1].
To rearrange the fft output so that the zero-frequency component is
centered, like [-4, -3, -2, -1, 0, 1, 2, 3], use `fftshift`.
For `n` even, ``A[n/2]`` contains the sum of the positive and
negative-frequency terms. For `n` even and `x` real, ``A[n/2]`` will
always be real.
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
This function is most efficient when `n` is a power of two, and least
efficient when `n` is prime.
If the data type of `x` is real, a "real FFT" algorithm is automatically
used, which roughly halves the computation time. To increase efficiency
a little further, use `rfft`, which does the same calculation, but only
outputs half of the symmetrical spectrum. If the data is both real and
symmetrical, the `dct` can again double the efficiency, by generating
half of the spectrum from half of the signal.
Examples
--------
>>> from scipy.fftpack import fft, ifft
>>> x = np.arange(5)
>>> np.allclose(fft(ifft(x)), x, atol=1e-15) # within numerical accuracy.
True
"""
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
if n is None:
n = tmp.shape[axis]
elif n != tmp.shape[axis]:
tmp, copy_made = _fix_shape(tmp,n,axis)
overwrite_x = overwrite_x or copy_made
if n < 1:
raise ValueError("Invalid number of FFT data points "
"(%d) specified." % n)
if axis == -1 or axis == len(tmp.shape) - 1:
return work_function(tmp,n,1,0,overwrite_x)
tmp = swapaxes(tmp, axis, -1)
tmp = work_function(tmp,n,1,0,overwrite_x)
return swapaxes(tmp, axis, -1)
def ifft(x, n=None, axis=-1, overwrite_x=False):
"""
Return discrete inverse Fourier transform of real or complex sequence.
The returned complex array contains ``y(0), y(1),..., y(n-1)`` where
``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``.
Parameters
----------
x : array_like
Transformed data to invert.
n : int, optional
Length of the inverse Fourier transform. If ``n < x.shape[axis]``,
`x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded.
The default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the ifft's are computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
ifft : ndarray of floats
The inverse discrete Fourier transform.
See Also
--------
fft : Forward FFT
Notes
-----
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
This function is most efficient when `n` is a power of two, and least
efficient when `n` is prime.
If the data type of `x` is real, a "real IFFT" algorithm is automatically
used, which roughly halves the computation time.
"""
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
if n is None:
n = tmp.shape[axis]
elif n != tmp.shape[axis]:
tmp, copy_made = _fix_shape(tmp,n,axis)
overwrite_x = overwrite_x or copy_made
if n < 1:
raise ValueError("Invalid number of FFT data points "
"(%d) specified." % n)
if axis == -1 or axis == len(tmp.shape) - 1:
return work_function(tmp,n,-1,1,overwrite_x)
tmp = swapaxes(tmp, axis, -1)
tmp = work_function(tmp,n,-1,1,overwrite_x)
return swapaxes(tmp, axis, -1)
def rfft(x, n=None, axis=-1, overwrite_x=False):
"""
Discrete Fourier transform of a real sequence.
Parameters
----------
x : array_like, real-valued
The data to transform.
n : int, optional
Defines the length of the Fourier transform. If `n` is not specified
(the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``,
`x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded.
axis : int, optional
The axis along which the transform is applied. The default is the
last axis.
overwrite_x : bool, optional
If set to true, the contents of `x` can be overwritten. Default is
False.
Returns
-------
z : real ndarray
The returned real array contains::
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd
where::
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n)
j = 0..n-1
Note that ``y(-j) == y(n-j).conjugate()``.
See Also
--------
fft, irfft, scipy.fftpack.basic
Notes
-----
Within numerical accuracy, ``y == rfft(irfft(y))``.
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
Examples
--------
>>> from scipy.fftpack import fft, rfft
>>> a = [9, -9, 1, 3]
>>> fft(a)
array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j])
>>> rfft(a)
array([ 4., 8., 12., 16.])
"""
tmp = _asfarray(x)
if not numpy.isrealobj(tmp):
raise TypeError("1st argument must be real sequence")
try:
work_function = _DTYPE_TO_RFFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fft(tmp,n,axis,1,overwrite_x,work_function)
def irfft(x, n=None, axis=-1, overwrite_x=False):
"""
Return inverse discrete Fourier transform of real sequence x.
The contents of `x` are interpreted as the output of the `rfft`
function.
Parameters
----------
x : array_like
Transformed data to invert.
n : int, optional
Length of the inverse Fourier transform.
If n < x.shape[axis], x is truncated.
If n > x.shape[axis], x is zero-padded.
The default results in n = x.shape[axis].
axis : int, optional
Axis along which the ifft's are computed; the default is over
the last axis (i.e., axis=-1).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
irfft : ndarray of floats
The inverse discrete Fourier transform.
See Also
--------
rfft, ifft
Notes
-----
The returned real array contains::
[y(0),y(1),...,y(n-1)]
where for n is even::
y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0] + (-1)**(j) x[n-1])
and for n is odd::
y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0])
c.c. denotes complex conjugate of preceding expression.
For details on input parameters, see `rfft`.
"""
tmp = _asfarray(x)
if not numpy.isrealobj(tmp):
raise TypeError("1st argument must be real sequence")
try:
work_function = _DTYPE_TO_RFFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fft(tmp,n,axis,-1,overwrite_x,work_function)
def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function):
""" Internal auxiliary function for fftnd, ifftnd."""
if s is None:
if axes is None:
s = x.shape
else:
s = numpy.take(x.shape, axes)
s = tuple(s)
if axes is None:
noaxes = True
axes = list(range(-x.ndim, 0))
else:
noaxes = False
if len(axes) != len(s):
raise ValueError("when given, axes and shape arguments "
"have to be of the same length")
for dim in s:
if dim < 1:
raise ValueError("Invalid number of FFT data points "
"(%s) specified." % (s,))
# No need to swap axes, array is in C order
if noaxes:
for i in axes:
x, copy_made = _fix_shape(x, s[i], i)
overwrite_x = overwrite_x or copy_made
return work_function(x,s,direction,overwrite_x=overwrite_x)
# We ordered axes, because the code below to push axes at the end of the
# array assumes axes argument is in ascending order.
a = numpy.array(axes, numpy.intc)
abs_axes = numpy.where(a < 0, a + x.ndim, a)
id_ = numpy.argsort(abs_axes)
axes = [axes[i] for i in id_]
s = [s[i] for i in id_]
# Swap the request axes, last first (i.e. First swap the axis which ends up
# at -1, then at -2, etc...), such as the request axes on which the
# operation is carried become the last ones
for i in range(1, len(axes)+1):
x = numpy.swapaxes(x, axes[-i], -i)
# We can now operate on the axes waxes, the p last axes (p = len(axes)), by
# fixing the shape of the input array to 1 for any axis the fft is not
# carried upon.
waxes = list(range(x.ndim - len(axes), x.ndim))
shape = numpy.ones(x.ndim)
shape[waxes] = s
for i in range(len(waxes)):
x, copy_made = _fix_shape(x, s[i], waxes[i])
overwrite_x = overwrite_x or copy_made
r = work_function(x, shape, direction, overwrite_x=overwrite_x)
# reswap in the reverse order (first axis first, etc...) to get original
# order
for i in range(len(axes), 0, -1):
r = numpy.swapaxes(r, -i, axes[-i])
return r
def fftn(x, shape=None, axes=None, overwrite_x=False):
"""
Return multidimensional discrete Fourier transform.
The returned array contains::
y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i)
where d = len(x.shape) and n = x.shape.
Note that ``y[..., -j_i, ...] = y[..., n_i-j_i, ...].conjugate()``.
Parameters
----------
x : array_like
The (n-dimensional) array to transform.
shape : tuple of ints, optional
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros.
If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to
length ``shape[i]``.
axes : array_like of ints, optional
The axes of `x` (`y` if `shape` is not None) along which the
transform is applied.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed. Default is False.
Returns
-------
y : complex-valued n-dimensional numpy array
The (n-dimensional) DFT of the input array.
See Also
--------
ifftn
Notes
-----
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
Examples
--------
>>> from scipy.fftpack import fftn, ifftn
>>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
>>> np.allclose(y, fftn(ifftn(y)))
True
"""
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, 1)
def _raw_fftn_dispatch(x, shape, axes, overwrite_x, direction):
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFTN[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fftnd(tmp,shape,axes,direction,overwrite_x,work_function)
def ifftn(x, shape=None, axes=None, overwrite_x=False):
"""
Return inverse multi-dimensional discrete Fourier transform of
arbitrary type sequence x.
The returned array contains::
y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i)
where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``.
For description of parameters see `fftn`.
See Also
--------
fftn : for detailed information.
"""
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, -1)
def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
"""
2-D discrete Fourier transform.
Return the two-dimensional discrete Fourier transform of the 2-D argument
`x`.
See Also
--------
fftn : for detailed information.
"""
return fftn(x,shape,axes,overwrite_x)
def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
"""
2-D discrete inverse Fourier transform of real or complex sequence.
Return inverse two-dimensional discrete Fourier transform of
arbitrary type sequence x.
See `ifft` for more information.
See also
--------
fft2, ifft
"""
return ifftn(x,shape,axes,overwrite_x)
| bsd-3-clause |
Ninad998/deepstylometry-python | crosslingual/MLModelCreatorWord.py | 1 | 4039 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import numpy as np
np.random.seed(123)
databaseConnectionServer = 'srn01.cs.cityu.edu.hk'
documentTable = 'document'
def loadData(documentTable, samples = 30000):
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
import DatabaseQuery
from sshtunnel import SSHTunnelForwarder
with SSHTunnelForwarder((databaseConnectionServer, 22),
ssh_username='stylometry',
ssh_password='stylometry',
remote_bind_address=('localhost', 5432),
local_bind_address=('localhost', 5400)):
textToUse = DatabaseQuery.getDocData(5400, documentTable = documentTable)
labels = []
groups = []
features = []
size = []
authorList = textToUse.author_id.unique()
for auth in authorList:
current = textToUse.loc[textToUse['author_id'] == auth]
size.append(current.shape[0])
print("Author: %5s Size: %5s" % (auth, current.shape[0]))
print("Min: %s" % (min(size)))
print("Max: %s" % (max(size)))
authorList = authorList.tolist()
for auth in authorList:
current = textToUse.loc[textToUse['author_id'] == auth]
if (samples > min(size)):
samples = min(size)
# current = current.sample(n = samples)
feat = current[["feature1", "feature2", "feature3", "feature4",
"feature5", "feature6", "feature7", "feature8"]].values.tolist()
features = features + feat
author = current["author_id"].tolist()
labels = labels + author
doc = current["doc_id"].tolist()
groups = groups + doc
del textToUse
print('Authors %s.' % (str(authorList)))
print('Found %s texts.' % len(features))
print('Found %s features.' % len(labels))
print('Found %s groups.' % len(groups))
return (np.array(features), np.array(labels), np.array(groups), samples)
def preProcessTrainVal(features, labels, groups, K_FOLD = 2):
# split the data into a training set and a validation set
from sklearn.model_selection import LeaveOneGroupOut
logo = LeaveOneGroupOut()
print(logo.get_n_splits(features, labels, groups))
return logo
def compileModel():
rf = RandomForestClassifier()
return rf
def recompileModel():
import cPickle as pickle
algoloadname = str("RandomForestClassifier" + '.pickle')
with open(algoloadname, 'rb') as handle:
model = pickle.load(handle)
return model
def fitModel(model, trainX, trainY, valX, valY):
model.fit(trainX, trainY)
train_acc = model.score(trainX, trainY)
val_acc = model.score(valX, valY)
print("\n\nFinal Train Accuracy: %.2f" % (train_acc * 100))
print("\nFinal Validation Accuracy: %.2f" % (val_acc * 100))
return (model, train_acc, val_acc)
def predictModel(model, testX, authorList):
# Function to take input of data and return prediction model
predY = np.array(model.predict_proba(testX))
predYList = predY[:]
entro = []
flag = False
import math
for row in predY:
entroval = 0
for i in row:
if(i <= 0):
flag = True
pass
else:
entroval += (i * (math.log(i , 2)))
entroval = -1 * entroval
entro.append(entroval)
if(flag == False):
yx = zip(entro, predY)
yx = sorted(yx, key = lambda t: t[0])
newPredY = [x for y, x in yx]
predYEntroList = newPredY[:int(len(newPredY)*0.5)]
predY = np.mean(predYEntroList, axis=0)
else:
predY = np.mean(predYList, axis=0)
return (predYList, predY)
| mit |
exa-analytics/atomic | exatomic/nwchem/inputs.py | 2 | 9896 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Input Generator and Parser
#############################
Every attempt is made to follow the Documentation on the
NWChem `website`_ with a general theme of the input Generator
accepting keyword arguments mirroring the keywords accepted by
NWChem and values corresponding to the parameters in a calculation.
.. _website: http://www.nwchem-sw.org/index.php/Release66:NWChem_Documentation
"""
# """
# Due to the complexity of the NWChem program and the innumerable
# permutations of input file formats, this is in no way meant to be
# an exhaustive wrapper of NWChem input files. Alternatively,
# valid key words are handled according to the types of the
# arguments being passed to it. If the argument is a string, it should
# be formatted how you want with new line breaks (see default argument
# for geomopts). Multiple options for the same keyword are handled as
# lists of tuples (example: basis=[('C', '3-21G'), ('H', '6-31G**')]).
# Similarly, convergence criteria may be specified with convergence =
# ['nolevelshifting', 'ncydp 30', 'damp 70']. The closer your string
# formatting is to what NWChem expects, the less likely it is that you
# will obtain syntax errors in the written input file.
# """
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
#import pandas as pd
#import numpy as np
from .editor import Editor
#from exa.util.units import Length as L
#from exatomic import Universe
_template = """echo
start {{name}}
title {{title}}
charge {{charge}}
{{memory}}
geometry {{geomopts}}
{{atom}}
end
basis {{basisopts}}
{{basis}}
end
{{set}}
{extras}
{calc}{{prop}}
{{task}}"""
_calcscf = """scf
nopen {mult}
maxiter {iterations}
end"""
_calcdft = """dft
direct
mult {mult}
xc {xc}
iterations {iterations}
tolerances {tolerances}
{grid}
{convergence}
{dft_other}
{restart}
end"""
class Input(Editor):
@classmethod
def from_universe(cls, uni, task='scf', fp=None, name=None, title=None,
charge=0, geomopts='units bohr\nsymmetry c1',
basisopts='spherical', basis='* library 6-31G',
mult=1, xc='b3lyp', iterations=100,
convergence='nolevelshifting', prop=' nbofile 2',
relativistic='', tddft='', ecp='', sets=None, tasks='property',
dft_other='', grid='xfine', tolerances='tight', memory=''):
calc = _calcdft if task == 'dft' else _calcscf
extras = ''
extradict = {}
for arg, extra in [('ecp', ecp), ('property', prop),
('relativistic', relativistic), ('tddft', tddft)]:
if extra:
extras += '{' + arg + '}'
extradict[arg] = _handle_arg(arg, extra)
fl = cls(_template.format(calc=calc, extras=extras))
keys = [key.split('}')[0].split(':')[0] for key in _template.split('{')[1:]]
keys += [key.split('}')[0].split(':')[0] for key in _calcscf.split('{')[1:]]
keys += [key.split('}')[0].split(':')[0] for key in _calcdft.split('{')[1:]]
kwargs = {key: '' for key in keys}
kwargs['atom'] = uni.atom.to_xyz()[:-1]
if name is not None:
kwargs['name'] = name
else:
kwargs['name'] = ''.join(atom['symbol'])
kwargs['title'] = title if title is not None else kwargs['name']
kwargs['charge'] = charge
kwargs['geomopts'] = _handle_arg('geomopts', geomopts)
kwargs['basisopts'] = _handle_arg('basisopts', basisopts)
kwargs['basis'] = _handle_arg('basis', basis)
if task == 'dft':
kwargs['mult'] = mult
elif mult - 1 > 0:
kwargs['mult'] = str(mult - 1) + '\n uhf'
else:
kwargs['mult'] = mult - 1
kwargs['xc'] = xc
kwargs['iterations'] = iterations
kwargs['tolerances'] = tolerances
kwargs['convergence'] = _handle_arg('convergence', convergence)
kwargs['grid'] = _handle_arg('grid', grid)
kwargs['dft_other'] = _handle_arg('dft_other', dft_other)
kwargs['memory'] = memory
if sets != None:
kwargs['set'] = _handle_arg('set', sets)
kwargs['task'] = ''
if isinstance(tasks, list):
for i in tasks:
kwargs['task'] += '\ntask '+task+' '+i
else:
kwargs['task'] += 'task '+task+' '+tasks
#extras = {'ecp': _handle_arg('ecp', ecp),
# 'tddft': _handle_arg('tddft', tddft),
# 'property': _handle_arg('property', prop),
# 'relativistic': _handle_arg('relativistic', relativistic)}
kwargs.update(extradict)
#### TASK AND EXTRAS
#kwargs['prop'] = '\n\nproperty\n nbofile 2\nend'
#kwargs['task'] = 'property'
#kwargs['calc'] = calc
#if options is not None:
# for opt, info in options.items():
# if opt in extras:
# _handle_info(opt, info, extras)
# elif kind == 'scf' and opt == 'mult':
# kwargs['mult'] = str(int(info) - 1) + '\n uhf' if int(info) > 1 else info
# else:
# _handle_info(opt, info, kwargs)
#extras = ['\n' + key + '\n' + val for key, val in extras.items() if val]
#kwargs['extras'] = '\n'.join([extra + '\nend' for extra in extras])
fl.format(inplace=True, **kwargs)
if fp is not None:
if name is not None:
fl.write(fp+name)
else:
fl.write(fp)
else:
return fl
def __init__(self, *args, **kwargs):
super(Input, self).__init__(*args, **kwargs)
def _handle_arg(opt, info):
type1 = {'basis': 'library', 'ecp': 'library'}
type2 = ['convergence', 'set', 'grid']
type3 = ['ecp', 'property', 'tddft', 'relativistic']
if isinstance(info, str):
if opt in type3:
return '\n{0}\n{1}\n{2}\n'.format(opt, info, 'end')
return info
if opt in type1:
ret = ''
for i, tup in enumerate(info):
if i == len(info) - 1:
ret = ' '.join([ret, tup[0], type1[opt], tup[1]])
else:
ret = ' '.join([ret, tup[0], type1[opt], tup[1], '\n'])
if opt in type3:
return '\n{0}\n{1}\n{2}\n'.format(opt, ret, 'end')
return ret
elif opt in type2:
ret = ''
if not isinstance(info, list):
info = [info]
for i, arg in enumerate(info):
if i == len(info) - 1:
ret = ' '.join([ret, opt, arg])
else:
ret = ' '.join([ret, opt, arg, '\n'])
if opt in type3:
return '\n{0}\n{1}\n{2}\n'.format(opt, ret, 'end')
return ret
else:
if isinstance(info, list):
return ' '.join([item for item in info])
else:
print('{} keyword not handled correctly with value {}'.format(opt, info))
def tuning_inputs(uni, name, mult, charge, basis, gammas, alphas,
route=None, link0=None,
field=None, writedir=None, deep=False):
"""
Provided a universe, generate input files for functional tuning.
Includes input keywords for orbital visualization within exatomic.
Assumes you will copy restart checkpoint files to have the same
names as the input files.
Args
uni (exatomic.container.Universe): molecular specification
name (str): prefix for job names
mult (int): spin multiplicity
charge (int): charge of the system
basis (list): tuples of atomic symbol, string of basis name
gammas (iter): values of range separation parameter (omega)
alphas (iter): fractions of Hartree-Fock in the short range
route (list): strings or tuples of keyword, value pairs (default [("Pop", "full")])
link0 (list): strings or tuples of keyword, value pairs
writedir (str): directory path to write input files
Returns
editors (list): input files as exa.Editors
"""
if route is None:
route = [("Pop", "full")]
fnstr = 'xcampbe96 1.0 cpbe96 1.0 HFexch 1.0\n'\
' cam {gam:.4f} cam_alpha {alp:.4f} cam_beta {bet:.4f}'.format
jbnm = '{name}-{{gam:.2f}}-{{alp:.2f}}-{{chg}}'.format(name=name).format
chgnms = ['cat', 'neut', 'an']
chgs = [charge + 1, charge, charge - 1]
mults = [2, 1, 2] if mult == 1 else [mult - 1, mult, mult + 1]
fls = []
for gam in gammas:
for alp in alphas:
#bet = 1 - alp
for chgnm, chg, mult in zip(chgnms, chgs, mults):
fnc = fnstr(gam=gam, alp=alp, bet=1-alp)
jnm = jbnm(gam=gam, alp=alp, bet=1-alp, chg=chgnm)
opts = {'charge': chg, 'mult': mult, 'task': 'dft',
'title': jnm, 'name': jnm, 'xc': fnc,
'basis': basis, 'prop': ''} #, 'writedir': writedir}
fls.append(Input.from_universe(uni, **opts))
fls[-1].name = jnm + '.nw'
return fls
# def tuning_inputs(uni, name, mult, charge, basis, gammas, alphas,
# route=[('Pop', 'full')], link0=None, nproc=4, mem=4,
# field=None, writedir=None, deep=False):
# def from_universe(cls, uni, task='scf', fp=None, name=None, title=None,
# charge=0, geomopts='units bohr\nsymmetry c1',
# basisopts='spherical', basis='* library 6-31G',
# mult=1, xc='b3lyp', iterations=100,
# convergence='nolevelshifting', prop=' nbofile 2',
# relativistic='', tddft='', ecp=''):
| apache-2.0 |
kcompher/BuildingMachineLearningSystemsWithPython | ch06/01_start.py | 6 | 3961 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains multinomial Naive Bayes on the tweet corpus
# to find two different results:
# - How well can we distinguis positive from negative tweets?
# - How well can we detect whether a tweet contains sentiment at all?
#
import time
start_time = time.time()
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
def create_ngram_model():
tfidf_ngrams = TfidfVectorizer(ngram_range=(1, 3),
analyzer="word", binary=False)
clf = MultinomialNB()
pipeline = Pipeline([('vect', tfidf_ngrams), ('clf', clf)])
return pipeline
def train_model(clf_factory, X, Y, name="NB ngram", plot=False):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
if plot:
plot_pr(pr_scores[median], name, "01", precisions[median],
recalls[median], label=name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print "%.3f\t%.3f\t%.3f\t%.3f\t" % summary
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in xrange(len(X_wrong)):
print "clf.predict('%s')=%i instead of %i" %\
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx])
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
classes = np.unique(Y_orig)
for c in classes:
print "#%s: %i" % (c, sum(Y_orig == c))
print "== Pos vs. neg =="
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(create_ngram_model, X, Y, name="pos vs neg", plot=True)
print "== Pos/neg vs. irrelevant/neutral =="
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
train_model(create_ngram_model, X, Y, name="sent vs rest", plot=True)
print "== Pos vs. rest =="
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(create_ngram_model, X, Y, name="pos vs rest", plot=True)
print "== Neg vs. rest =="
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(create_ngram_model, X, Y, name="neg vs rest", plot=True)
print "time spent:", time.time() - start_time
| mit |
IndraVikas/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
HaydenFaulkner/phd | processing/VEN/S2S/s2vt_captioner_old.py | 1 | 26012 | DEVICE_ID = 0
""" Script to generate captions from video features"""
from collections import OrderedDict
import argparse
import cPickle as pickle
import h5py
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import random
import sys
sys.path.append('/home/hayden/caffe-recurrent/python')
# libcudart.so.7.5: cannot open shared object file: No such file or directory
# export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
sys.path.insert(0, '/home/hayden/caffe-recurrent/python')
import caffe
from framefc7_text_to_hdf5_data_old import *
def vocab_inds_to_sentence(vocab, inds):
sentence = ' '.join([vocab[i] for i in inds])
# Capitalize first character.
sentence = sentence[0].upper() + sentence[1:]
# Replace <EOS> with '.', or append '...'.
if sentence.endswith(' ' + vocab[0]):
sentence = sentence[:-(len(vocab[0]) + 1)] + '.'
else:
sentence += '...'
return sentence
def video_to_descriptor(video_id, fsg):
video_features = []
assert video_id in fsg.vid_framefeats
all_frames_fc7 = fsg.vid_framefeats[video_id]
for frame_fc7 in all_frames_fc7:
frame_fc7 = fsg.float_line_to_stream(frame_fc7)
video_features.append(np.array(frame_fc7).reshape(1, len(frame_fc7)))
return video_features
def encode_video_frames(net, video_features, previous_word=-1):
for frame_feature in video_features:
cont_input = 0 if previous_word == -1 else 1
previous_word = 0
cont = np.array([cont_input])
data_en = np.array([previous_word])
stage_ind = np.array([0]) # encoding stage
image_features = np.zeros_like(net.blobs['frames_fc7'].data)
image_features[:] = frame_feature
net.forward(frames_fc7=image_features, cont_sentence=cont, input_sentence=data_en,
stage_indicator=stage_ind)
def predict_single_word(net, pad_img_feature, previous_word, output='probs'):
cont_input = 1
cont = np.array([cont_input])
data_en = np.array([previous_word])
stage_ind = np.array([1]) # decoding stage
image_features = np.zeros_like(net.blobs['frames_fc7'].data)
image_features[:] = pad_img_feature
net.forward(frames_fc7=image_features, cont_sentence=cont, input_sentence=data_en,
stage_indicator=stage_ind)
output_preds = net.blobs[output].data.reshape(-1)
return output_preds
def predict_single_word_from_all_previous(net, pad_img_feature, previous_words):
probs = predict_single_word(net, pad_img_feature, 0)
for index, word in enumerate(previous_words):
probs = predict_single_word(net, pad_img_feature, word)
return probs
# Strategy must be either 'beam' or 'sample'.
# If 'beam', do a max likelihood beam search with beam size num_samples.
# Otherwise, sample with temperature temp.
def predict_image_caption(net, pad_img_feature, vocab_list, strategy={'type': 'beam'}):
assert 'type' in strategy
assert strategy['type'] in ('beam', 'sample')
if strategy['type'] == 'beam':
return predict_image_caption_beam_search(net, pad_img_feature, vocab_list, strategy)
num_samples = strategy['num'] if 'num' in strategy else 1
samples = []
sample_probs = []
for _ in range(num_samples):
sample, sample_prob = sample_image_caption(net, pad_img_feature, strategy)
samples.append(sample)
sample_probs.append(sample_prob)
return samples, sample_probs
def softmax(softmax_inputs, temp):
exp_inputs = np.exp(temp * softmax_inputs)
exp_inputs_sum = exp_inputs.sum()
if math.isnan(exp_inputs_sum):
return exp_inputs * float('nan')
elif math.isinf(exp_inputs_sum):
assert exp_inputs_sum > 0 # should not be -inf
return np.zeros_like(exp_inputs)
eps_sum = 1e-8
return exp_inputs / max(exp_inputs_sum, eps_sum)
def random_choice_from_probs(softmax_inputs, temp=1.0, already_softmaxed=False):
if already_softmaxed:
probs = softmax_inputs
assert temp == 1.0
else:
probs = softmax(softmax_inputs, temp)
r = random.random()
cum_sum = 0.
for i, p in enumerate(probs):
cum_sum += p
if cum_sum >= r: return i
return 1 # return UNK?
def sample_image_caption(net, image, strategy, net_output='predict', max_length=128):
sentence = []
probs = []
eps_prob = 1e-8
temp = strategy['temp'] if 'temp' in strategy else 1.0
if max_length < 0: max_length = float('inf')
while len(sentence) < max_length and (not sentence or sentence[-1] != 0):
previous_word = sentence[-1] if sentence else 0
softmax_inputs = \
predict_single_word(net, image, previous_word, output=net_output)
word = random_choice_from_probs(softmax_inputs, temp)
sentence.append(word)
probs.append(softmax(softmax_inputs, 1.0)[word])
return sentence, probs
def predict_image_caption_beam_search(net, pad_img_feature, vocab_list, strategy, max_length=128):
# Note: This code support S2VT only for beam-width 1.
beam_size = 1
beams = [[]]
beams_complete = 0
beam_probs = [[]]
beam_log_probs = [0.]
current_input_word = 0 # first input is EOS
while beams_complete < len(beams):
expansions = []
for beam_index, beam_log_prob, beam in \
zip(range(len(beams)), beam_log_probs, beams):
if beam:
previous_word = beam[-1]
if len(beam) >= max_length or previous_word == 0:
exp = {'prefix_beam_index': beam_index, 'extension': [],
'prob_extension': [], 'log_prob': beam_log_prob}
expansions.append(exp)
# Don't expand this beam; it was already ended with an EOS,
# or is the max length.
continue
else:
previous_word = 0 # EOS is first word
if beam_size == 1:
probs = predict_single_word(net, pad_img_feature, previous_word)
else:
probs = predict_single_word_from_all_previous(net, pad_img_feature, beam)
assert len(probs.shape) == 1
assert probs.shape[0] == len(vocab_list)
expansion_inds = probs.argsort()[-beam_size:]
for ind in expansion_inds:
prob = probs[ind]
extended_beam_log_prob = beam_log_prob + math.log(prob)
exp = {'prefix_beam_index': beam_index, 'extension': [ind],
'prob_extension': [prob], 'log_prob': extended_beam_log_prob}
expansions.append(exp)
# Sort expansions in decreasing order of probabilitf.
expansions.sort(key=lambda expansion: -1 * expansion['log_prob'])
expansions = expansions[:beam_size]
new_beams = \
[beams[e['prefix_beam_index']] + e['extension'] for e in expansions]
new_beam_probs = \
[beam_probs[e['prefix_beam_index']] + e['prob_extension'] for e in expansions]
beam_log_probs = [e['log_prob'] for e in expansions]
beams_complete = 0
for beam in new_beams:
if beam[-1] == 0 or len(beam) >= max_length: beams_complete += 1
beams, beam_probs = new_beams, new_beam_probs
return beams, beam_probs
def run_pred_iter(net, pad_image_feature, vocab_list, strategies=[{'type': 'beam'}]):
outputs = []
for strategy in strategies:
captions, probs = predict_image_caption(net, pad_image_feature, vocab_list, strategy=strategy)
for caption, prob in zip(captions, probs):
output = {}
output['caption'] = caption
output['prob'] = prob
output['gt'] = False
output['source'] = strategy
outputs.append(output)
return outputs
def score_caption(net, image, caption, is_gt=True, caption_source='gt'):
output = {}
output['caption'] = caption
output['gt'] = is_gt
output['source'] = caption_source
output['prob'] = []
probs = predict_single_word(net, image, 0)
for word in caption:
output['prob'].append(probs[word])
probs = predict_single_word(net, image, word)
return output
def next_video_gt_pair(tsg):
streams = tsg.get_streams()
video_id = tsg.lines[tsg.line_index-1][0]
gt = streams['target_sentence']
return video_id, gt
def all_video_gt_pairs(fsg):
data = OrderedDict()
if len(fsg.lines) > 0:
prev_video_id = None
while True:
video_id, gt = next_video_gt_pair(fsg)
if video_id in data:
if video_id != prev_video_id:
break
data[video_id].append(gt)
else:
data[video_id] = [gt]
prev_video_id = video_id
print 'Found %d videos with %d captions' % (len(data.keys()), len(data.values()))
else:
data = OrderedDict(((key, []) for key in fsg.vid_framefeats.keys()))
return data
def gen_stats(prob, normalizer=None):
stats = {}
stats['length'] = len(prob)
stats['log_p'] = 0.0
eps = 1e-12
for p in prob:
assert 0.0 <= p <= 1.0
stats['log_p'] += math.log(max(eps, p))
stats['log_p_word'] = stats['log_p'] / stats['length']
try:
stats['perplex'] = math.exp(-stats['log_p'])
except OverflowError:
stats['perplex'] = float('inf')
try:
stats['perplex_word'] = math.exp(-stats['log_p_word'])
except OverflowError:
stats['perplex_word'] = float('inf')
if normalizer is not None:
norm_stats = gen_stats(normalizer)
stats['normed_perplex'] = \
stats['perplex'] / norm_stats['perplex']
stats['normed_perplex_word'] = \
stats['perplex_word'] / norm_stats['perplex_word']
return stats
def run_pred_iters(pred_net, vidids, video_gt_pairs, fsg,
strategies=[{'type': 'beam'}], display_vocab=None):
outputs = OrderedDict()
num_pairs = 0
descriptor_video_id = ''
pad_img_feature = None
for video_id in vidids:
gt_captions = video_gt_pairs[video_id] # gets the target stream
assert video_id not in outputs
num_pairs += 1
if descriptor_video_id != video_id:
# get fc7 feature for the video
video_features = video_to_descriptor(video_id, fsg)
print 'Num video features: %d ' % len(video_features)
print 'Dimension of video features: {0}'.format(video_features[0].shape)
# run lstm on all the frames of video before predicting
encode_video_frames(pred_net, video_features)
# use the last frame from the video as padding
pad_img_feature = video_features[-1]
# Make padding all 0 when predicting
pad_img_feature[pad_img_feature > 0] = 0
desciptor_video_id = video_id
outputs[video_id] = \
run_pred_iter(pred_net, pad_img_feature, display_vocab, strategies=strategies)
# for gt_caption in gt_captions:
# outputs[image_path].append(
# score_caption(pred_net, pad_img_feature, gt_caption))
if display_vocab is not None:
for output in outputs[video_id]:
caption, prob, gt, source = \
output['caption'], output['prob'], output['gt'], output['source']
caption_string = vocab_inds_to_sentence(display_vocab, caption)
if gt:
tag = 'Actual'
else:
tag = 'Generated'
stats = gen_stats(prob)
print '%s caption (length %d, log_p = %f, log_p_word = %f):\n%s' % \
(tag, stats['length'], stats['log_p'], stats['log_p_word'], caption_string)
return outputs
def to_html_row(columns, header=False):
out= '<tr>'
for column in columns:
if header: out += '<th>'
else: out += '<td>'
try:
if int(column) < 1e8 and int(column) == float(column):
out += '%d' % column
else:
out += '%0.04f' % column
except:
out += '%s' % column
if header: out += '</th>'
else: out += '</td>'
out += '</tr>'
return out
def to_html_output(outputs, vocab):
out = ''
for video_id, captions in outputs.iteritems():
for c in captions:
if not 'stats' in c:
c['stats'] = gen_stats(c['prob'])
# Sort captions by log probability.
if 'normed_perplex' in captions[0]['stats']:
captions.sort(key=lambda c: c['stats']['normed_perplex'])
else:
captions.sort(key=lambda c: -c['stats']['log_p_word'])
out += '<img src="%s"><br>\n' % video_id
out += '<table border="1">\n'
column_names = ('Source', '#Words', 'Perplexity/Word', 'Caption')
out += '%s\n' % to_html_row(column_names, header=True)
for c in captions:
caption, gt, source, stats = \
c['caption'], c['gt'], c['source'], c['stats']
caption_string = vocab_inds_to_sentence(vocab, caption)
if gt:
source = 'ground truth'
if 'correct' in c:
caption_string = '<font color="%s">%s</font>' % \
('green' if c['correct'] else 'red', caption_string)
else:
caption_string = '<em>%s</em>' % caption_string
else:
if source['type'] == 'beam':
source = 'beam (size %d)' % source['beam_size']
elif source['type'] == 'sample':
source = 'sample (temp %f)' % source['temp']
else:
raise Exception('Unknown type: %s' % source['type'])
caption_string = '<strong>%s</strong>' % caption_string
columns = (source, stats['length'] - 1,
stats['perplex_word'], caption_string)
out += '%s\n' % to_html_row(columns)
out += '</table>\n'
out += '<br>\n\n'
out += '<br>' * 2
out.replace('<unk>', 'UNK') # sanitize...
return out
def to_text_output(outputs, vocab):
out_types = {}
caps = outputs[outputs.keys()[0]]
for c in caps:
caption, gt, source = \
c['caption'], c['gt'], c['source']
if source['type'] == 'beam':
source_meta = 'beam_size_%d' % source['beam_size']
elif source['type'] == 'sample':
source_meta = 'sample_temp_ %f' % source['temp']
else:
raise Exception('Unknown type: %s' % source['type'])
if source_meta not in out_types:
out_types[source_meta] = []
num_videos = 0
out = ''
for video_id, captions in outputs.iteritems():
num_videos += 1
for c in captions:
if not 'stats' in c:
c['stats'] = gen_stats(c['prob'])
# Sort captions by log probability.
if 'normed_perplex' in captions[0]['stats']:
captions.sort(key=lambda c: c['stats']['normed_perplex'])
else:
captions.sort(key=lambda c: -c['stats']['log_p_word'])
for c in captions:
caption, gt, source, stats = \
c['caption'], c['gt'], c['source'], c['stats']
caption_string = vocab_inds_to_sentence(vocab, caption)
source_meta = 'beam_size_%d' % source['beam_size']
out = '%s\t%s\t%s\n' % (source_meta, video_id,caption_string)
# if len(out_types[source_meta]) < num_videos:
out_types[source_meta].append(out)
return out_types
def retrieval_image_list(dataset, cache_dir):
image_list_filename = '%s/image_paths.txt' % cache_dir
if os.path.exists(image_list_filename):
with open(image_list_filename, 'r') as image_list_file:
image_paths = [i.strip() for i in image_list_file.readlines()]
assert set(image_paths) == set(dataset.keys())
else:
image_paths = dataset.keys()
with open(image_list_filename, 'w') as image_list_file:
image_list_file.write('\n'.join(image_paths) + '\n')
return image_paths
def compute_descriptors(net, image_list, output_name='fc7'):
batch = np.zeros_like(net.blobs['data'].data)
batch_shape = batch.shape
batch_size = batch_shape[0]
descriptors_shape = (len(image_list), ) + net.blobs[output_name].data.shape[1:]
descriptors = np.zeros(descriptors_shape)
for batch_start_index in range(0, len(image_list), batch_size):
batch_list = image_list[batch_start_index:(batch_start_index + batch_size)]
for batch_index, image_path in enumerate(batch_list):
batch[batch_index:(batch_index + 1)] = preprocess_image(net, image_path)
print 'Computing descriptors for images %d-%d of %d' % \
(batch_start_index, batch_start_index + batch_size - 1, len(image_list))
net.forward(data=batch)
print 'Done'
descriptors[batch_start_index:(batch_start_index + batch_size)] = \
net.blobs[output_name].data
return descriptors
def retrieval_descriptors(net, image_list, cache_dir):
descriptor_filename = '%s/descriptors.npz' % cache_dir
if os.path.exists(descriptor_filename):
descriptors = np.load(descriptor_filename)['descriptors']
else:
descriptors = compute_descriptors(net, image_list)
np.savez_compressed(descriptor_filename, descriptors=descriptors)
return descriptors
def retrieval_caption_list(dataset, image_list, cache_dir):
caption_list_filename = '%s/captions.pkl' % cache_dir
if os.path.exists(caption_list_filename):
with open(caption_list_filename, 'rb') as caption_list_file:
captions = pickle.load(caption_list_file)
else:
captions = []
for image in image_list:
for caption in dataset[image]:
captions.append({'source_image': image, 'caption': caption})
# Sort by length for performance.
captions.sort(key=lambda c: len(c['caption']))
with open(caption_list_filename, 'wb') as caption_list_file:
pickle.dump(captions, caption_list_file)
return captions
def sample_captions(net, image_features,
prob_output_name='probs', output_name='samples', caption_source='sample'):
cont_input = np.zeros_like(net.blobs['cont_sentence'].data)
word_input = np.zeros_like(net.blobs['input_sentence'].data)
batch_size = image_features.shape[0]
outputs = []
output_captions = [[] for b in range(batch_size)]
output_probs = [[] for b in range(batch_size)]
caption_index = 0
num_done = 0
while num_done < batch_size:
if caption_index == 0:
cont_input[:] = 0
elif caption_index == 1:
cont_input[:] = 1
if caption_index == 0:
word_input[:] = 0
else:
for index in range(batch_size):
word_input[index] = \
output_captions[index][caption_index - 1] if \
caption_index <= len(output_captions[index]) else 0
net.forward(image_features=image_features,
cont_sentence=cont_input, input_sentence=word_input)
net_output_samples = net.blobs[output_name].data
net_output_probs = net.blobs[prob_output_name].data
for index in range(batch_size):
# If the caption is empty, or non-empty but the last word isn't EOS,
# predict another word.
if not output_captions[index] or output_captions[index][-1] != 0:
next_word_sample = net_output_samples[index]
assert next_word_sample == int(next_word_sample)
next_word_sample = int(next_word_sample)
output_captions[index].append(next_word_sample)
output_probs[index].append(net_output_probs[index, next_word_sample])
if next_word_sample == 0: num_done += 1
print '%d/%d done after word %d' % (num_done, batch_size, caption_index)
caption_index += 1
for prob, caption in zip(output_probs, output_captions):
output = {}
output['caption'] = caption
output['prob'] = prob
output['gt'] = False
output['source'] = caption_source
outputs.append(output)
return outputs
def print_top_samples(vocab, samples, out_filename=None):
top_sample = OrderedDict()
for sample in samples:
stats = gen_stats(sample['prob'])
image_path = sample['source']
if image_path not in top_sample:
top_sample[image_path] = (None, -float('inf'))
if stats['log_p_word'] > top_sample[image_path][1]:
top_sample[image_path] = (sample['caption'], stats['log_p_word'])
out_file = open(out_filename, 'w') if out_filename is not None else sys.stdout
for image_path, sample in top_sample.iteritems():
image_id = os.path.split(image_path)[1]
out_file.write("%s\t%s\n" % (image_id, vocab_inds_to_sentence(vocab, sample[0])))
out_file.close()
print 'Wrote top samples to:', out_filename
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--modelname", type=str, #required=True,
help='Name of model without ".caffemodel" extension')
parser.add_argument("-t", "--testset", action='store_true',
help='Evaluate on test set. If unspecified then val set.')
parser.add_argument("-o", "--htmlout", action='store_true',
help='output sentences as html to visually compare')
parser.add_argument("-g", "--gold", action='store_true',
help='groundtruth sentences for scoring/retrieval')
args = parser.parse_args()
# TODO: Input the snapshot directory, vocab path, frames (and sents) path
LAYER = 'fc7'
DIR = '/media/hayden/Storage1/UBUNTU/MODELS/SENTENCE/CAFFE/VEN/S2S/001/snapshots'
VOCAB_FILE = '/media/hayden/Storage1/UBUNTU/DATASETS/VIDEO/TENNIS/POINTS/VOCABULARY/CAFFE/vocabulary.txt'
FRAMEFEAT_FILE_PATTERN = '/media/hayden/Storage1/UBUNTU/DATASETS/VIDEO/TENNIS/POINTS/FEATURES/TENNIS_NET/002/'+LAYER+'/txtV/tennis_allframes_tennis002_'+LAYER+'_{0}.txt'
LSTM_NET_FILE = '/media/hayden/Storage1/UBUNTU/MODELS/SENTENCE/CAFFE/VEN/S2S/001/s2vt.words_to_preds.deploy.prototxt'
RESULTS_DIR = '/media/hayden/Storage1/UBUNTU/MODELS/SENTENCE/CAFFE/VEN/S2S/001/RESULTS2/'+LAYER
MODEL_FILE = '%s/%s.caffemodel' % (DIR, args.modelname)
MODEL_FILE = '/media/hayden/Storage1/UBUNTU/MODELS/SENTENCE/CAFFE/VEN/S2S/001/SNAPSHOTS/'+LAYER+'/s2vt_tennis_tennis002_iter_50000.caffemodel'
SENTS_FILE = args.gold if args.gold else None # optional
NET_TAG = args.modelname
NET_TAG = 's2vt_tennis_tennis002_iter_50000'
LAYER = 'fc7'
DIR = '/media/hayden/Storage1/UBUNTU/MODELS/SENTENCE/CAFFE/VEN/S2S/002/snapshots'
VOCAB_FILE = '/media/hayden/Storage1/UBUNTU/DATASETS/VIDEO/TENNIS/POINTS/VOCABULARY/CAFFE/vocabulary.txt'
FRAMEFEAT_FILE_PATTERN = '/media/hayden/Storage1/UBUNTU/DATASETS/VIDEO/TENNIS/POINTS/FEATURES/TENNIS_NET/002/' + LAYER + '/txtV/tennis_allframes_tennis002_' + LAYER + '_{0}.txt'
LSTM_NET_FILE = '/media/hayden/Storage1/UBUNTU/MODELS/SENTENCE/CAFFE/VEN/S2S/002/s2vt.words_to_preds.deploy.prototxt'
RESULTS_DIR = '/media/hayden/Storage1/UBUNTU/MODELS/SENTENCE/CAFFE/VEN/S2S/002/RESULTS'
MODEL_FILE = '%s/%s.caffemodel' % (DIR, args.modelname)
MODEL_FILE = '/media/hayden/Storage1/UBUNTU/MODELS/SENTENCE/CAFFE/VEN/S2S/002/SNAPSHOTS/s2vt_tennis_tennis002_iter_5000.caffemodel'
SENTS_FILE = args.gold if args.gold else None # optional
NET_TAG = args.modelname
NET_TAG = 's2vt_tennis_tennis002_iter_5000'
######################### ABOVE WORKS PERFECTLY ##############################
######################## CAN'T GET BELOW TO WORK #############################
# LAYER = 'fc7'
# CNN_MODEL = 'MVCD004_02'
# RNN_MODEL = 'MSCV003_02'
# NET_TAG = 's2vt_tennis_' + RNN_MODEL + 'd_iter_50000'
# NET_TAG = 'tennis_iter_5000'
# DIR = DRIVE + 'MODELS/SENTENCE/CAFFE/VEN/S2S/' + RNN_MODEL + '/' + LAYER + '/SNAPSHOTS/'
# VOCAB_FILE = DRIVE + 'DATASETS/VIDEO/TENNIS/S2S/VOCABULARY/CAFFE/vocabulary.txt'
# FRAMEFEAT_FILE_PATTERN = DRIVE + 'DATASETS/VIDEO/TENNIS/FEATURES/CLASSES/Point/' + CNN_MODEL + '/' + LAYER + '/txt2/{0}.txt'
#
# LSTM_NET_FILE = DRIVE + 'MODELS/SENTENCE/CAFFE/VEN/S2S/' + RNN_MODEL + '/' + LAYER + '/s2vt.words_to_preds.deploy.prototxt'
# print LSTM_NET_FILE
# print os.path.exists(LSTM_NET_FILE)
# RESULTS_DIR = DRIVE + 'MODELS/SENTENCE/CAFFE/VEN/S2S/' + RNN_MODEL + '/' + LAYER + '/RESULTS'
# # MODEL_FILE = '%s/%s.caffemodel' % (DIR, args.modelname)
# # MODEL_FILE = DIR+'s2vt_tennis_tennis002_iter_50000.caffemodel'
# SENTS_FILE = None # DRIVE + 'DATASETS/VIDEO/TENNIS/COMMENTARY/ALIGNED/CLASSES/Point/S005/sents_test_lc_nopunc.txt'#args.gold if args.gold else None # optional
# # NET_TAG = args.modelname
# MODEL_FILE = DIR + NET_TAG + '.caffemodel'
###################################################################################
if DEVICE_ID >= 0:
caffe.set_mode_gpu()
caffe.set_device(DEVICE_ID)
else:
caffe.set_mode_cpu()
print "Setting up LSTM NET"
lstm_net = caffe.Net(LSTM_NET_FILE, MODEL_FILE, caffe.TEST)
print "Done"
nets = [lstm_net]
STRATEGIES = [
{'type': 'beam', 'beam_size': 1},
]
NUM_OUT_PER_CHUNK = 30
START_CHUNK = 0
vocab_file = VOCAB_FILE
DATASETS = [ ] # split_name, data_split_name, aligned
# if args.testset:
# DATASETS.append(('test', 'test', False))
# else:
# DATASETS.append(('valid', 'val', False))
DATASETS.append(('train', 'train', False))
DATASETS.append(('valid', 'val', False))
DATASETS.append(('test', 'test', False))
for split_name, data_split_name, aligned in DATASETS:
filenames = [(FRAMEFEAT_FILE_PATTERN.format(data_split_name),
SENTS_FILE)]
fsg = fc7FrameSequenceGenerator(filenames, BUFFER_SIZE,
vocab_file, max_words=MAX_WORDS, align=aligned, shuffle=False,
pad=aligned, truncate=aligned)
video_gt_pairs = all_video_gt_pairs(fsg)
print 'Read %d videos pool feats' % len(fsg.vid_framefeats)
NUM_CHUNKS = (len(fsg.vid_framefeats)/NUM_OUT_PER_CHUNK) + 1
eos_string = '<EOS>'
# add english inverted vocab
vocab_list = [eos_string] + fsg.vocabulary_inverted
offset = 0
for c in range(START_CHUNK, NUM_CHUNKS):
chunk_start = c * NUM_OUT_PER_CHUNK
chunk_end = (c + 1) * NUM_OUT_PER_CHUNK
chunk = video_gt_pairs.keys()[chunk_start:chunk_end]
html_out_filename = '%s/%s.%s.%d_to_%d.html' % \
(RESULTS_DIR, data_split_name, NET_TAG, chunk_start, chunk_end)
text_out_filename = '%s/%s.%s_' % \
(RESULTS_DIR, data_split_name, NET_TAG)
if not os.path.exists(RESULTS_DIR): os.makedirs(RESULTS_DIR)
outputs = run_pred_iters(lstm_net, chunk, video_gt_pairs,
fsg, strategies=STRATEGIES, display_vocab=vocab_list)
if args.htmlout:
html_out = to_html_output(outputs, vocab_list)
html_out_file = open(html_out_filename, 'w')
html_out_file.write(html_out)
html_out_file.close()
text_out_types = to_text_output(outputs, vocab_list)
for strat_type in text_out_types:
text_out_fname = text_out_filename + strat_type + '.txt'
text_out_file = open(text_out_fname, 'a')
text_out_file.write(''.join(text_out_types[strat_type]))
text_out_file.close()
offset += NUM_OUT_PER_CHUNK
print '(%d-%d) Appending to file: %s' % (chunk_start, chunk_end,
text_out_fname)
if __name__ == "__main__":
main() | mit |
sourabhdattawad/BuildingMachineLearningSystemsWithPython | ch08/norm.py | 23 | 2242 | import numpy as np
class NormalizePositive(object):
def __init__(self, axis=0):
self.axis = axis
def fit(self, features, y=None):
# count features that are greater than zero in axis `self.axis`:
if self.axis == 1:
features = features.T
binary = (features > 0)
count = binary.sum(axis=0)
# to avoid division by zero, set zero counts to one:
count[count == 0] = 1.
self.mean = features.sum(axis=0)/count
# Compute variance by average squared difference to the mean, but only
# consider differences where binary is True (i.e., where there was a
# true rating):
diff = (features - self.mean) * binary
diff **= 2
# regularize the estimate of std by adding 0.1
self.std = np.sqrt(0.1 + diff.sum(axis=0)/count)
return self
def transform(self, features):
if self.axis == 1:
features = features.T
binary = (features > 0)
features = features - self.mean
features /= self.std
features *= binary
if self.axis == 1:
features = features.T
return features
def inverse_transform(self, features, copy=True):
if copy:
features = features.copy()
if self.axis == 1:
features = features.T
features *= self.std
features += self.mean
if self.axis == 1:
features = features.T
return features
def fit_transform(self, features):
return self.fit(features).transform(features)
def predict(train):
norm = NormalizePositive()
train = norm.fit_transform(train)
return norm.inverse_transform(train * 0.)
def main(transpose_inputs=False):
from load_ml100k import get_train_test
from sklearn import metrics
train,test = get_train_test(random_state=12)
if transpose_inputs:
train = train.T
test = test.T
predicted = predict(train)
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score ({} normalization): {:.1%}'.format(
('movie' if transpose_inputs else 'user'),
r2))
if __name__ == '__main__':
main()
main(transpose_inputs=True)
| mit |
pianomania/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 36 | 6957 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.neighbors import BallTree
from sklearn.utils.testing import SkipTest, assert_raises_regex
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_pickle(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
yield self.check_pickle, metric, kwargs
for metric in self.bool_metrics:
yield self.check_pickle_bool, metric
def check_pickle_bool(self, metric):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(self.X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(self, metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(self.X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
def test_bad_pyfunc_metric():
def wrong_distance(x, y):
return "1"
X = np.ones((5, 2))
assert_raises_regex(TypeError,
"Custom distance function must accept two vectors",
BallTree, X, metric=wrong_distance)
def test_input_data_size():
# Regression test for #6288
# Previoulsly, a metric requiring a particular input dimension would fail
def custom_metric(x, y):
assert x.shape[0] == 3
return np.sum((x - y) ** 2)
rng = np.random.RandomState(0)
X = rng.rand(10, 3)
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
eucl = DistanceMetric.get_metric("euclidean")
assert_array_almost_equal(pyfunc.pairwise(X), eucl.pairwise(X))
| bsd-3-clause |
madgik/exareme | Exareme-Docker/src/mip-algorithms/CART/cart_lib.py | 1 | 34421 | from __future__ import division
from __future__ import print_function
import sys
from os import path
import numpy as np
import pandas as pd
import json
import itertools
import logging
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))) + '/utils/')
from algorithm_utils import TransferData, PRIVACY_MAGIC_NUMBER, init_logger
def add_vals(a,b):
if a == None and b == None:
return None
else:
return(a or 0 ) +( b or 0)
def DataFrameFilter(df, key, operator, value):
if operator == '==':
return df[df[key] == value]
elif operator == '>':
return df[df[key] > value]
elif operator == '<':
return df[df[key] < value]
elif operator == '>=':
return df[df[key] >= value]
elif operator == '<=':
return df[df[key] <= value]
#else:
# raise ExaremeError('DataFrameFilter: Wrong Operator')
def add_dict(dict1,dict2):
resultdict = dict()
for key in dict1:
if key in dict2:
resultdict [key] = dict1[key] + dict2[key]
else:
resultdict [key] = dict1[key]
for key in dict2:
if key not in dict1:
resultdict [key] = dict2[key]
return resultdict
def comp_min_val(a,b):
if a == None and b == None:
return None
elif a == None and b != None:
return b
elif a != None and b == None:
return a
else:
return min(a,b)
def comp_max_val(a,b):
if a == None and b == None:
return None
elif a == None and b != None:
return b
elif a != None and b == None:
return a
else:
return max(a,b)
class Node:
def __init__(self, criterion, colName, gain, threshold, samples, samplesPerClass, classValue):
self.criterion = criterion
self.colName = colName
self.gain = gain
self.threshold = threshold
self.samples = samples
self.samplesPerClass = samplesPerClass
self.classValue = classValue
self.left = None
self.right = None
def tree_to_json(self):
#TODO sto tree_to_json na mhn kanw return to samplesPerClass logw privacy
#raise ValueError(self.criterion,self.gain,self.colName,self.threshold,self.samples,self.samplesPerClass,self.left,self.right )
rightvalue = None
if self.right is not None:
rightvalue = self.right.tree_to_json()
leftvalue = None
if self.left is not None:
leftvalue = self.left.tree_to_json()
myclass = None
samplesPerClass = None
myclassValue = None
if self.criterion == "gini": #TODO: Ti ginetai se isobathmia klasewn!!!!!!
samplesPerClass = dict()
for key in self.samplesPerClass:
if myclass is None or self.samplesPerClass[key]> self.samplesPerClass[myclass]:
myclass = str(key)
samplesPerClass[str(key)] = self.samplesPerClass[key]
elif self.criterion == "mse":
myclassValue = self.classValue
if self.right is not None or self.left is not None:
myclass = None
return { "colName" : None if self.gain == 0 or (self.right is None and self.left is None) else str(self.colName) ,
"threshold" : None if self.gain == 0 or (self.right is None and self.left is None) else self.threshold,
"criterion" :self.criterion,
"gain" : self.gain,
"samples" : "Less than " + str(PRIVACY_MAGIC_NUMBER) if self.samples <= PRIVACY_MAGIC_NUMBER else self.samples,
#"samplesPerClass" : samplesPerClass,
"classValue" : myclassValue,
"class" : myclass,
"right" : rightvalue,
"left" : leftvalue }
def grow_tree(self, path, criterion, bestColName, parentNodeGain, bestThreshold, samples, samplesPerClass, classValue):
if len(path) > 0:
if path[0]["operator"] == '<=':
if self.left == None:
self.left = Node(criterion, bestColName, parentNodeGain, bestThreshold, samples,samplesPerClass, classValue)
else:
self.left.grow_tree(path[1:],criterion, bestColName, parentNodeGain, bestThreshold, samples, samplesPerClass, classValue)
if path[0]["operator"] =='>':
if self.right == None:
self.right = Node(criterion, bestColName, parentNodeGain, bestThreshold, samples,samplesPerClass, classValue)
else:
self.right.grow_tree(path[1:],criterion, bestColName, parentNodeGain, bestThreshold, samples, samplesPerClass, classValue)
class CartInit_Loc2Glob_TD(TransferData):
def __init__(self, *args):
if len(args) != 4:
raise ValueError('Illegal number of arguments.')
self.args_X = args[0]
self.args_Y = args[1]
self.CategoricalVariables = args[2]
self.t1 = args[3]
def get_data(self):
return self.args_X, self.args_Y, self.CategoricalVariables,self.t1
def __add__(self, other):
return CartInit_Loc2Glob_TD(
self.args_X,
self.args_Y,
self.CategoricalVariables,
self.t1
)
class Cart_Glob2Loc_TD(TransferData):
def __init__(self, *args):
if len(args) != 2:
raise ValueError('Illegal number of arguments.')
self.globalTree = args[0]
self.activePaths = args[1]
def get_data(self):
return self.globalTree, self.activePaths
class CartIter1_Loc2Glob_TD(TransferData):
def __init__(self, *args):
if len(args) != 1:
raise ValueError('Illegal number of arguments.')
self.activePaths = args[0]
def get_data(self):
return self.activePaths
def __add__(self, other):
#run ./FINALME2.py
A1 = self.activePaths
A2 = other.activePaths
activePathsNew = dict()
for no in A1:
activePathsNew[no] = dict()
#1. ADD ["filter"]
activePathsNew[no]["filter"] = A1[no]["filter"]
#2. ADD [domainJ"]
activePathsNew[no]["domainJ"] = dict()
for key in A1[no]["domainJ"]:
activePathsNew[no]["domainJ"][key]= dict()
activePathsNew[no]["domainJ"][key]["min"] = comp_min_val(A1[no]["domainJ"][key]["min"],A2[no]["domainJ"][key]["min"])
activePathsNew[no]["domainJ"][key]["max"] = comp_max_val(A1[no]["domainJ"][key]["max"],A2[no]["domainJ"][key]["max"])
#2. ADD ["thresholds"]
activePathsNew[no]["thresholdsJ"] = dict()
for key in A1[no]["thresholdsJ"]:
activePathsNew[no]["thresholdsJ"][key] = sorted(np.unique(A1[no]["thresholdsJ"][key] + A2[no]["thresholdsJ"][key]))
#3. ADD ["samples"]
activePathsNew[no]["samples"] = A1[no]["samples"] + A2[no]["samples"]
#4. If args_Y[0] not in CategoricalVariables: ADD ["classNumbersJ"]["parentNode"]
if "classNumbersJ" in A1[no] and "classNumbersJ" in A2[no]:
activePathsNew[no]["classNumbersJ"] = dict()
activePathsNew[no]["classNumbersJ"]["parentNode"] = dict()
activePathsNew[no]["classNumbersJ"]["parentNode"]["counts"] = add_dict(A1[no]["classNumbersJ"]["parentNode"]["counts"], A2[no]["classNumbersJ"]["parentNode"]["counts"])
if "statisticsJ" in A1[no] and "statisticsJ" in A2[no]:
activePathsNew[no]["statisticsJ"] = dict()
activePathsNew[no]["statisticsJ"]["parentNode"] = dict()
activePathsNew[no]["statisticsJ"]["parentNode"]["ss_argsY"] = add_vals(A1[no]["statisticsJ"]["parentNode"]["ss_argsY"], A2[no]["statisticsJ"]["parentNode"]["ss_argsY"])
activePathsNew[no]["statisticsJ"]["parentNode"]["nn_argsY"] = add_vals(A1[no]["statisticsJ"]["parentNode"]["nn_argsY"], A2[no]["statisticsJ"]["parentNode"]["nn_argsY"])
#TODO: ADD {"ss_argsY" : np.sum(X[args_Y[0]]), "nn_argsY" : X.shape[0] }
return CartIter1_Loc2Glob_TD(activePathsNew)
class CartIter2_Loc2Glob_TD(TransferData):
def __init__(self, *args):
if len(args) != 1:
raise ValueError('Illegal number of arguments.')
self.activePaths = args[0]
def get_data(self):
return self.activePaths
def __add__(self, other):
#run ./FINALME2.py
A1 = self.activePaths
A2 = other.activePaths
activePathsNew = dict()
for no in A1:
activePathsNew[no] = dict()
#1. ADD ["filter"],["thresholds"],["samples"]
activePathsNew[no]["filter"] = A1[no]["filter"]
activePathsNew[no]["thresholdsJ"] = A1[no]["thresholdsJ"]
activePathsNew[no]["domainJ"] = A1[no]["domainJ"]
activePathsNew[no]["samples"] = A1[no]["samples"]
#2. ADD ["classNumbersJ"]["parentNode"]
if "classNumbersJ" in A1[no] and "classNumbersJ" in A2[no]:
activePathsNew[no]["classNumbersJ"] = dict()
activePathsNew[no]["classNumbersJ"]["parentNode"] = A1[no]["classNumbersJ"]["parentNode"]
if "statisticsJ" in A1[no] and "statisticsJ" in A2[no]:
activePathsNew[no]["statisticsJ"] = dict()
activePathsNew[no]["statisticsJ"]["parentNode"] = A1[no]["statisticsJ"]["parentNode"]
#3. ADD ["classNumbersJ"][key] or ["statisticsJ"][key]
for key in A1[no]["thresholdsJ"]:
if "classNumbersJ" in A1[no] and "classNumbersJ" in A2[no] :
if key in A1[no]["classNumbersJ"]:
activePathsNew[no]["classNumbersJ"][key] = dict()
activePathsNew[no]["classNumbersJ"][key]["countsRight"] = list()
activePathsNew[no]["classNumbersJ"][key]["countsLeft"] = list()
for i in xrange(len(activePathsNew[no]["thresholdsJ"][key])):
activePathsNew[no]["classNumbersJ"][key]["countsRight"].append(add_dict(A1[no]["classNumbersJ"][key]["countsRight"][i], A2[no]["classNumbersJ"][key]["countsRight"][i]))
activePathsNew[no]["classNumbersJ"][key]["countsLeft"].append(add_dict(A1[no]["classNumbersJ"][key]["countsLeft"][i], A2[no]["classNumbersJ"][key]["countsLeft"][i]))
if "statisticsJ" in A1[no] and "statisticsJ" in A2[no]:
if key in A1[no]["statisticsJ"]:
activePathsNew[no]["statisticsJ"][key] = dict()
activePathsNew[no]["statisticsJ"][key]["ssLeft"] = [add_vals(A1[no]["statisticsJ"][key]["ssLeft"][i], A2[no]["statisticsJ"][key]["ssLeft"][i]) for i in xrange(len(A1[no]["statisticsJ"][key]["ssLeft"]))]
activePathsNew[no]["statisticsJ"][key]["ssRight"] =[add_vals(A1[no]["statisticsJ"][key]["ssRight"][i], A2[no]["statisticsJ"][key]["ssRight"][i]) for i in xrange(len(A1[no]["statisticsJ"][key]["ssRight"]))]
activePathsNew[no]["statisticsJ"][key]["nnLeft"] = [add_vals(A1[no]["statisticsJ"][key]["nnLeft"][i], A2[no]["statisticsJ"][key]["nnLeft"][i]) for i in xrange(len(A1[no]["statisticsJ"][key]["nnLeft"]))]
activePathsNew[no]["statisticsJ"][key]["nnRight"] = [add_vals(A1[no]["statisticsJ"][key]["nnRight"][i], A2[no]["statisticsJ"][key]["nnRight"][i]) for i in xrange(len(A1[no]["statisticsJ"][key]["nnRight"]))]
return CartIter2_Loc2Glob_TD(activePathsNew)
class CartIter3_Loc2Glob_TD(TransferData):
def __init__(self, *args):
if len(args) != 1:
raise ValueError('Illegal number of arguments.')
self.activePaths = args[0]
def get_data(self):
return self.activePaths
def __add__(self, other):
#run ./FINALME2.py
A1 = self.activePaths
A2 = other.activePaths
activePathsNew = dict()
for no in A1:
activePathsNew[no] = dict()
#1. ADD ["filter"],["thresholds"],["samples"]
activePathsNew[no]["filter"] = A1[no]["filter"]
activePathsNew[no]["thresholdsJ"] = A1[no]["thresholdsJ"]
activePathsNew[no]["domainJ"] = A1[no]["domainJ"]
activePathsNew[no]["samples"] = A1[no]["samples"]
#2. ADD ["classNumbersJ"]
if "classNumbersJ" in A1[no] and "classNumbersJ" in A2[no]:
activePathsNew[no]["classNumbersJ"] = A1[no]["classNumbersJ"]
if "statisticsJ" in A1[no] and "statisticsJ" in A2[no]:
activePathsNew[no]["statisticsJ"] = dict()
activePathsNew[no]["statisticsJ"]["parentNode"] = A1[no]["statisticsJ"]["parentNode"] #TODO: Einai swsto? Ti alla exei?
activePathsNew[no]["statisticsJ"]["parentNode"]["mse"] = add_vals(A1[no]["statisticsJ"]["parentNode"]["mse"], A2[no]["statisticsJ"]["parentNode"]["mse"])
#3. ADD ["classNumbersJ"][key] or ["statisticsJ"][key]
for key in A1[no]["thresholdsJ"]:
if "statisticsJ" in A1[no] and "statisticsJ" in A2[no]:
if key in A1[no]["statisticsJ"]:
activePathsNew[no]["statisticsJ"][key] = dict()
activePathsNew[no]["statisticsJ"][key]["ssLeft"] = A1[no]["statisticsJ"][key]["ssLeft"]
activePathsNew[no]["statisticsJ"][key]["ssRight"] = A1[no]["statisticsJ"][key]["ssLeft"]
activePathsNew[no]["statisticsJ"][key]["nnLeft"] = A1[no]["statisticsJ"][key]["ssLeft"]
activePathsNew[no]["statisticsJ"][key]["nnRight"] = A1[no]["statisticsJ"][key]["ssLeft"]
activePathsNew[no]["statisticsJ"][key]["meanLeft"]= A1[no]["statisticsJ"][key]["meanLeft"]
activePathsNew[no]["statisticsJ"][key]["meanRight"]= A1[no]["statisticsJ"][key]["meanRight"]
activePathsNew[no]["statisticsJ"][key]["mseLeft"] = [add_vals(A1[no]["statisticsJ"][key]["mseLeft"][i], A2[no]["statisticsJ"][key]["mseLeft"][i]) for i in xrange(len(A1[no]["statisticsJ"][key]["mseLeft"]))]
activePathsNew[no]["statisticsJ"][key]["mseRight"] = [add_vals(A1[no]["statisticsJ"][key]["mseRight"][i], A2[no]["statisticsJ"][key]["mseRight"][i]) for i in xrange(len(A1[no]["statisticsJ"][key]["mseRight"]))]
return CartIter3_Loc2Glob_TD(activePathsNew)
##########################################################################################################################
def cart_init_1_local(dataFrame, dataSchema, CategoricalVariables):
#Delete null values from DataFrame
dataFrame = dataFrame.dropna()
for x in dataSchema:
if x in CategoricalVariables:
dataFrame = dataFrame[dataFrame[x].astype(bool)]
for key in CategoricalVariables:
CategoricalVariables[key] = list(np.unique(dataFrame[key]))
# if len(dataFrame) < PRIVACY_MAGIC_NUMBER:
# raise PrivacyError('The Experiment could not run with the input provided because there are insufficient data.')
return dataFrame, CategoricalVariables
##########################################################################################################################
def cart_init_1_global():
globalTree = None
activePaths = None
return globalTree, activePaths
##########################################################################################################################
def compute_local_thresholds(X, args_X, CategoricalVariables):
thresholdsJ = dict()
for varx in args_X:
thresholdsJ[varx] = []
if varx in CategoricalVariables: # For categorical only variables
L = []
for combs in (itertools.combinations(CategoricalVariables[varx], r) for r in range(len(CategoricalVariables[varx])+1)) :
for comb in combs:
diff = list(set( CategoricalVariables[varx][:]) - set(comb))
#print diff, list(comb)
if diff != [] and list(comb) != []:
L.append({"left" : diff, "right" : list(comb)})
thresholdsJ[varx] = L[0:int(len(L)/2)]
#thresholdsJ[varx] = set(thresholdsJ[varx]) #keep unique values
#thresholdsJ[varx] = list(thresholdsJ[varx])
#thresholdsJ[varx].sort()
return thresholdsJ
def compute_local_domain(X, args_X, CategoricalVariables):
domain = dict()
for varx in args_X:
if varx not in CategoricalVariables: # For numeric only variables
nn = len(X[varx])
if nn == 0:
domain[varx] = {"min": None, "max": None}
else:
domain[varx] = { "min" : min(X[varx]) , "max" :max(X[varx])}
return domain
def cart_step_1_local(X, args_X, args_Y, CategoricalVariables, activePaths):
# Compute local thresholds for each activePath and variable,
# Compute samples per class (for classification) , ss and nn (for regression )
if activePaths == None:
activePaths = dict()
activePaths[0] = dict()
activePaths[0]["filter"] = []
activePaths[0]["thresholdsJ"] = compute_local_thresholds(X, args_X, CategoricalVariables)
activePaths[0]["domainJ"] = compute_local_domain(X, args_X, CategoricalVariables)
activePaths[0]["samples"] = X.shape[0]
if args_Y[0] not in CategoricalVariables: # Regression Algorithm
activePaths[0]["statisticsJ"] = dict()
activePaths[0]["statisticsJ"]["parentNode"] = {"ss_argsY" : np.sum(X[args_Y[0]]), "nn_argsY" : X.shape[0] }
elif args_Y[0] in CategoricalVariables: # Classification Algorithm
activePaths[0]["classNumbersJ"] = dict()
activePaths[0]["classNumbersJ"]["parentNode"] = {"counts": json.loads(X.groupby(args_Y[0])[args_Y[0]].count().to_json())}
#["samplesPerClass"] = json.loads(X.groupby(args_Y[0])[args_Y[0]].count().to_json())
else:
for key in activePaths:
dX = X
# For each unfinished path, find the subset of dataFrame (df)
for i in xrange(len(activePaths[key]['filter'])):
dX = DataFrameFilter(dX, activePaths[key]['filter'][i]["variable"],
activePaths[key]['filter'][i]["operator"],
activePaths[key]['filter'][i]["value"])
activePaths[key]["thresholdsJ"] = compute_local_thresholds(dX, args_X, CategoricalVariables)
activePaths[key]["domainJ"] = compute_local_domain(dX, args_X, CategoricalVariables)
activePaths[key]["samples"] = dX.shape[0]
if args_Y[0] not in CategoricalVariables: # Regression Algorithm
activePaths[key]["statisticsJ"] = dict()
activePaths[key]["statisticsJ"]["parentNode"] = {"ss_argsY" : np.sum(dX[args_Y[0]]), "nn_argsY" : dX.shape[0] }
elif args_Y[0] in CategoricalVariables: # Classification Algorithm
activePaths[key]["classNumbersJ"] = dict()
activePaths[key]["classNumbersJ"]["parentNode"] = {"counts": json.loads(dX.groupby(args_Y[0])[args_Y[0]].count().to_json())}
else:
raise ValueError ("ERROR2", activePaths)
return activePaths
##########################################################################################################################
def cart_step_1_global(args_X, args_Y, CategoricalVariables, activePaths, no_split_points):
for no in activePaths:
for key in activePaths[no]['thresholdsJ']:
if key not in CategoricalVariables:
if len(activePaths[no]['thresholdsJ'][key])<no_split_points:
activePaths[no]['thresholdsJ'][key] = []
imin = activePaths[no]["domainJ"][key]["min"]
imax = activePaths[no]["domainJ"][key]["max"]
if imax != imin:
step = (imax-imin) / no_split_points
for i in np.arange(imin,imax,step):
activePaths[no]['thresholdsJ'][key].append(i)
return activePaths
##########################################################################################################################
def node_computations(dataFrame, colNames, activePath, className, CategoricalVariables, flag):
if flag == "classNumbers":
classNumbersJ = dict()
elif flag == "statistics":
statisticsJ = dict()
for colName in colNames:
df = dataFrame[[colName,className]]
thresholds = activePath["thresholdsJ"][colName]
if flag == "classNumbers":
countsLeft = [None]*len(thresholds)
countsRight = [None]*len(thresholds)
elif flag == "statistics":
ssLeft = [None]*len(thresholds)
ssRight = [None]*len(thresholds)
nnLeft = [None]*len(thresholds)
nnRight = [None]*len(thresholds)
for i in xrange(len(thresholds)):
if colName not in CategoricalVariables:
dfLeft = df.loc[df[colName] <= thresholds[i]]
dfRight = df.loc[df[colName] > thresholds[i]]
elif colName in CategoricalVariables:
dfLeft = df.loc[df[colName].isin(thresholds[i]['left'])]
dfRight = df.loc[df[colName].isin(thresholds[i]['right'])]
if flag == "classNumbers":
countsLeft[i] = json.loads(dfLeft.groupby(className)[className].count().to_json())
countsRight[i] = json.loads(dfRight.groupby(className)[className].count().to_json())
elif flag == "statistics":
ssLeft[i] = np.sum(dfLeft[className])
ssRight[i] = np.sum(dfRight[className])
nnLeft[i] = len(dfLeft[className])
nnRight[i] = len(dfRight[className])
if flag == "classNumbers":
classNumbersJ[colName]= { "countsLeft" : countsLeft, "countsRight": countsRight }
elif flag == "statistics":
statisticsJ[colName] = { "ssLeft" : ssLeft, "ssRight" : ssRight, "nnLeft" : nnLeft, "nnRight" : nnRight}
if flag == "classNumbers":
return classNumbersJ
if flag == "statistics":
return statisticsJ
def cart_step_2_local(dataFrame, CategoricalVariables, args_X, args_Y, activePaths):
# Run algorithm local iteration step
for key in activePaths:
df = dataFrame
# For each unfinished path, find the subset of dataFrame (df)
for i in xrange(len(activePaths[key]['filter'])):
df = DataFrameFilter(df, activePaths[key]['filter'][i]["variable"],
activePaths[key]['filter'][i]["operator"],
activePaths[key]['filter'][i]["value"])
if args_Y[0] in CategoricalVariables: #Classification Algorithm
resultJ = node_computations(df, args_X, activePaths[key], args_Y[0], CategoricalVariables,"classNumbers")
activePaths[key]["classNumbersJ"] = dict(activePaths[key]["classNumbersJ"].items() + resultJ.items())
elif args_Y[0] not in CategoricalVariables: # Regression Algorithm
resultJ = node_computations(df, args_X, activePaths[key], args_Y[0], CategoricalVariables,"statistics")
activePaths[key]["statisticsJ"] = dict(activePaths[key]["statisticsJ"].items() + resultJ.items())
return activePaths
##########################################################################################################################
def cart_step_2_global(args_X, args_Y,CategoricalVariables, activePaths):
if args_Y[0] not in CategoricalVariables: # Regression Algorithm
for key in activePaths:
activePaths[key]["statisticsJ"]["parentNode"]["mean_argsY"] = activePaths[key]["statisticsJ"]["parentNode"]["ss_argsY"]/ activePaths[key]["statisticsJ"]["parentNode"]["nn_argsY"]
for colName in args_X:
activePaths[key]["statisticsJ"][colName]["meanLeft"] = [i / j if j != 0 else None for i, j in zip(activePaths[key]["statisticsJ"][colName]["ssLeft"] , activePaths[key]["statisticsJ"][colName]["nnLeft"])]
activePaths[key]["statisticsJ"][colName]["meanRight"] = [i / j if j != 0 else None for i, j in zip(activePaths[key]["statisticsJ"][colName]["ssRight"], activePaths[key]["statisticsJ"][colName]["nnRight"])]
return activePaths
##########################################################################################################################
def compute_statistics2_in_the_node(dataFrame, colNames, activePath, className, CategoricalVariables):
statisticsJ = dict()
for colName in colNames:
df = dataFrame[[colName,className]]
thresholds = activePath["thresholdsJ"][colName]
mseLeft = [None]*len(thresholds)
mseRight = [None]*len(thresholds)
for i in xrange(len(thresholds)):
if colName not in CategoricalVariables:
dfLeft = df.loc[df[colName] <= thresholds[i]]
dfRight = df.loc[df[colName] > thresholds[i]]
elif colName in CategoricalVariables: # TODO!!! : Check this (what happens if it is a string)!! (how I compute mseLeft and mseRight)
dfLeft = df.loc[df[colName].isin(thresholds[i]['left'])]
dfRight = df.loc[df[colName].isin(thresholds[i]['right'])]
mseLeft[i] = None
mseRight[i] = None
if activePath["statisticsJ"][colName]["meanLeft"][i] is not None:
mseLeft[i] = np.sum((dfLeft[className] - activePath["statisticsJ"][colName]["meanLeft"][i])**2)
if activePath["statisticsJ"][colName]["meanRight"][i] is not None:
mseRight[i] = np.sum((dfRight[className] - activePath["statisticsJ"][colName]["meanRight"][i])**2)
statisticsJ[colName] = {"mseLeft" : mseLeft, "mseRight" : mseRight}
#print (statisticsJ)
return statisticsJ
def cart_step_3_local(dataFrame, args_X, args_Y, CategoricalVariables, activePaths):
for key in activePaths:
df = dataFrame
# For each unfinished path, find the subset of dataFrame (df)
for i in xrange(len(activePaths[key]['filter'])):
df = DataFrameFilter(df, activePaths[key]['filter'][i]["variable"],
activePaths[key]['filter'][i]["operator"],
activePaths[key]['filter'][i]["value"])
if args_Y[0] not in CategoricalVariables: # Regression Algorithm
activePaths[key]["statisticsJ"]["parentNode"]["mse"] = np.sum((df[args_Y[0]] - activePaths[key]["statisticsJ"]["parentNode"]["mean_argsY"])**2 )
mystat = compute_statistics2_in_the_node(df, args_X, activePaths[key],args_Y[0], CategoricalVariables)
for colName in args_X:
activePaths[key]["statisticsJ"][colName]["mseLeft"] = mystat[colName]["mseLeft"]
activePaths[key]["statisticsJ"][colName]["mseRight"] = mystat[colName]["mseRight"]
return activePaths
##########################################################################################################################
def compute_gini(colName, thresholds, classNumbersJOfColname, nnNode, classDistVal):
#Compute gini index for the new nodes
giniTotal = [0.0]*len(thresholds)
for i in xrange(len(thresholds)):
ccLeft = 0.0
nnLeft = 0
ccRight = 0.0
nnRight = 0
giniTotal[i] = 0.0
for val in classDistVal:
if val in classNumbersJOfColname["countsLeft"][i]:
ccLeft += classNumbersJOfColname["countsLeft"][i][val]**2
nnLeft += classNumbersJOfColname["countsLeft"][i][val]
if val in classNumbersJOfColname["countsRight"][i]:
#print classNumbersJOfColname["countsRight"][i][val]
ccRight += classNumbersJOfColname["countsRight"][i][val]**2
nnRight += classNumbersJOfColname["countsRight"][i][val]
if nnLeft>0:
giniLeft = (1.0 - ccLeft/(nnLeft**2))
giniTotal[i] += (nnLeft/nnNode) * (1.0 - ccLeft/nnLeft**2)
if nnRight>0:
giniRight = (1.0 - ccRight/(nnRight**2))
giniTotal[i] += (nnRight/nnNode) * (1.0 - ccRight/nnRight**2)
bestGini = min(giniTotal)
bestThreshold = thresholds[giniTotal.index(bestGini)]
#print bestGini,bestThreshold
return colName, bestGini, bestThreshold
def compute_mse(colName, thresholds, statisticJOfColname, gainNode, nnNode):
#Compute mse for the right and left node
gain = [0.0]*len(thresholds)
for i in xrange(len(thresholds)):
gain[i] = None
if nnNode!=0 and (statisticJOfColname['mseLeft'][i] is not None or statisticJOfColname['mseRight'][i] is not None):
gain[i] = add_vals(statisticJOfColname['mseLeft'][i],statisticJOfColname['mseRight'][i])/ nnNode
bestGain = min(gain)
bestThreshold = thresholds[gain.index(bestGain)]
return colName, bestGain, bestThreshold
def best_splits(activePath, colNames, className, CategoricalVariables):
bestGain = None
#Compute mse for the parent node
if className in CategoricalVariables: # Classification Algorithm
gainNode = 0.0
for key in activePath["classNumbersJ"]["parentNode"]["counts"]:
gainNode += activePath["classNumbersJ"]["parentNode"]["counts"][key]**2
gainNode = 1.0 - gainNode / activePath["samples"]**2
elif className not in CategoricalVariables: # Regression Algorithm
gainNode = activePath["statisticsJ"]["parentNode"]["mse"]/activePath["statisticsJ"]["parentNode"]["nn_argsY"]
for colName in colNames:
if len(activePath["thresholdsJ"][colName])>0:
if className in CategoricalVariables : # Classification Algorithm
colName, gain, threshold = compute_gini(colName, activePath["thresholdsJ"][colName], activePath["classNumbersJ"][colName], activePath["samples"], CategoricalVariables[className])
elif className not in CategoricalVariables: # Regression Algorithm
colName, gain, threshold = compute_mse(colName, activePath["thresholdsJ"][colName], activePath["statisticsJ"][colName], gainNode, activePath["statisticsJ"]["parentNode"]["nn_argsY"])
if bestGain == None or bestGain > gain:
bestColName = colName
bestGain = gain
bestThreshold = threshold
return bestColName, gainNode, bestGain, bestThreshold
def cart_step_3_global(args_X, args_Y, CategoricalVariables, globalTree , activePaths):
if args_Y[0] not in CategoricalVariables: # Regression Algorithm
criterion = "mse"
elif args_Y[0] in CategoricalVariables: # Classification Algorithm
criterion = "gini"
#logging.warning(["activePaths:", activePaths])
activePathsNew = dict()
no = 0
for key in activePaths:
if activePaths[key]["samples"] > 1 and max([len(activePaths[key]['thresholdsJ'][i]) for i in args_X]) > 0 : # if activePaths[key]["samples"] > PRIVACY_MAGIC_NUMBER:
bestColName, valueParentNode, bestValue, bestThreshold = best_splits(activePaths[key], args_X, args_Y[0], CategoricalVariables)
if valueParentNode > 0 : #If the number of samples>PRIVACY_NYMBER then I have privacy issues or #If GiniNode = 0 then I am in a leaf wih pure class.
#print ("MYKEY", key,activePaths[key]["samples"])
activePathsNew[no]=dict()
activePathsNew[no]["thresholdsJ"] = None
activePathsNew[no]["filter"] = activePaths[key]['filter'][:]
activePathsNew[no]["filter"].append( { "variable" : bestColName,"operator" : "<=","value" : bestThreshold })
activePathsNew[no+1]=dict()
activePathsNew[no+1]["thresholdsJ"] = None
activePathsNew[no+1]["filter"] = activePaths[key]['filter'][:]
activePathsNew[no+1]["filter"].append( { "variable" : bestColName, "operator" : ">", "value" : bestThreshold })
no = no + 2
samplesPerClass = None
if args_Y[0] in CategoricalVariables: # Classification Algorithm
samplesPerClass = activePaths[key]["classNumbersJ"]["parentNode"]["counts"]
classValue = None
if args_Y[0] not in CategoricalVariables: # Regression Algorithm
classValue = activePaths[key]["statisticsJ"]["parentNode"]["ss_argsY"] /activePaths[key]["statisticsJ"]["parentNode"]["nn_argsY"]
if globalTree is None:
globalTree = Node(criterion, bestColName, valueParentNode, bestThreshold, activePaths[key]["samples"], samplesPerClass, classValue)
#globalTreeJ = globalTree.tree_to_json()
#raise ValueError("AA", criterion, bestColName, valueParentNode, bestThreshold, activePaths[key]["samples"], samplesPerClass)
else:
globalTree.grow_tree(activePaths[key]['filter'], criterion, bestColName, valueParentNode, bestThreshold,activePaths[key]["samples"], samplesPerClass, classValue)
else: #It is leaf -->TODO
samplesPerClass = None
if args_Y[0] in CategoricalVariables: # Classification Algorithm
samplesPerClass = activePaths[key]["classNumbersJ"]["parentNode"]["counts"]
classValue = None
if args_Y[0] not in CategoricalVariables: # Regression Algorithm
classValue = activePaths[key]["statisticsJ"]["parentNode"]["ss_argsY"] /activePaths[key]["statisticsJ"]["parentNode"]["nn_argsY"]
globalTree.grow_tree(activePaths[key]['filter'], criterion, None, None, None,activePaths[key]["samples"], samplesPerClass, classValue) #Isws edw na dinw kai alla stoixeia. Alla logw privacy den xreiazetai
#logging.warning(["activePathsNew:", activePathsNew])
activePaths = activePathsNew
#globalTreeJ = globalTree.tree_to_json()
return globalTree, activePaths
#if global_state['stepsNo'] ==1:
# raise ValueError(globalTreeJ)
#print activePaths
#print globalTreeJ
| mit |
jabeerahmed/testrepo | term1/Lessons/LaneDetection/hough_transform.py | 1 | 6249 | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import os
COLOR_SWATCH = [
[255, 64, 0], [255, 128, 0], [255, 191, 0],
[255, 255, 0], [191, 255, 0], [128, 255, 0], [64, 255, 0],
[0, 255, 0], [0, 255, 64], [0, 255, 128],
[0, 255, 191], [0, 255, 255], [0, 191, 255],
[0, 128, 255], [0, 64, 255], [0, 0, 255],
[64, 0, 255], [128, 0, 255], [191, 0, 255],
[255, 0, 255], [255, 0, 191], [255, 0, 128],
[255, 0, 64], [255, 0, 0]
]
def getColor(counter=0):
return COLOR_SWATCH[(counter*5) % len(COLOR_SWATCH)]
def image_size(img):
return (img.shape[0], img.shape[1])
def edge_detect(gray, kernel_size=5, low_threshold=50, high_threshold=150):
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0)
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
return edges, blur_gray
def create_region_mask(imshape, vertices, ignore_mask_color = 255):
mask = np.zeros((imshape[0], imshape[1]), dtype=np.uint8)
cv2.fillPoly(mask, vertices, ignore_mask_color)
return mask
def create_quad_region_mask(imshape, top_width = 0.1, top_height = 0.55, ignore_mask_color = 255):
# This time we are defining a four sided polygon to mask
h = imshape[0]
w = imshape[1]
w_min = ((1.0 - top_width)/2)*w
w_max = ((1.0 + top_width)/2)*w
h_min = (h * top_height)
vertices = np.array([[(0, h), (w_min, h_min), (w_max, h_min), (w,h)]], dtype=np.int32)
return vertices, create_region_mask(imshape, vertices)
def apply_mask(img, mask):
a = image_size(img)
b = image_size(mask)
assert(a == b), "image(" + str(a) + ") and mask(" + str(b) + ") size mismatch"
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
masked_image = np.zeros_like(img)
for i in range(channel_count):
masked_image[:,:,i] = cv2.bitwise_and(img[:, :,i], mask)
return masked_image
else:
return cv2.bitwise_and(img, mask)
def hough_lines_P(img, rho, theta, threshold, min_line_len, max_line_gap):
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
return lines
def draw_lines(img, lines, multi_color=False, thickness=2):
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
i = 0
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line( img, (x1, y1), (x2, y2), getColor(i), thickness)
cv2.line(line_img, (x1, y1), (x2, y2), getColor(i), thickness)
if (multi_color): i = i + 1
return line_img
def process(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
## Detect Edges
ED_kernel_size = 5; ED_low_threshold=50; ED_high_threshold=150
[edges, blur_gray] = edge_detect(gray, ED_kernel_size, ED_low_threshold, ED_high_threshold)
## Create Region Mask
REG_top_width = 0.1; REG_top_height = 0.60
verts, mask = create_quad_region_mask(gray.shape, REG_top_width, REG_top_height)
masked_edges = apply_mask(edges, mask)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi/180 * 1 # angular resolution in radians of the Hough grid
threshold = 60 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 75 #minimum number of pixels making up a line
max_line_gap = 50 # maximum gap in pixels between connectable line segments
gray_image = np.dstack((gray, gray, gray))
lines = hough_lines_P(masked_edges, rho, theta, threshold, min_line_length, max_line_gap)
line_image = draw_lines(gray_image, lines, thickness=3)
# Create a "color" binary image to combine with line image
color_edges = np.dstack((edges, edges, edges))
# Draw the lines on the edge image
lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)
# org_plt = plt.figure(1).add_subplot(111)
# org_plt.imshow(image)
#
# roi_plt = plt.figure(2).add_subplot(111)
# roi_plt.imshow(apply_mask(image, mask))
#
# lin_plt = plt.figure(3).add_subplot(111)
# lin_plt.imshow(lines_edges)
#
# gry_plt = plt.figure(4).add_subplot(111)
# gry_plt.imshow(gray_image)
#
# edg_plt = plt.figure(5).add_subplot(111)
# edg_plt.imshow(edges, cmap='gray')
#
# plt.show()
print("NumLines = " + str(len(lines)))
return gray_image
#%% Run Test Images
test_dir = "../project1/test_images/"
test_files = [os.path.abspath(os.path.join(test_dir, f)) for f in os.listdir(test_dir)]
all_img = range(len(test_files))
sub_rng = [0, 1, 2, 3, 4, 5]
for image_index in all_img:
image_name = test_files[image_index]
## Read in and grayscale the image
image = mpimg.imread(image_name)
process(image)
#%% Run White mp4
from moviepy.editor import VideoFileClip
from IPython.display import HTML
test_dir = "../project1/"
white_output = os.path.join(test_dir, 'white.mp4')
clip1 = VideoFileClip( os.path.join(test_dir, "solidWhiteRight.mp4"))
white_clip = clip1.fl_image(process) #NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
#%% Run Yellow mp4
from moviepy.editor import VideoFileClip
from IPython.display import HTML
test_dir = "../project1/"
yellow_output = os.path.join(test_dir, 'yellow.mp4')
clip2 = VideoFileClip( os.path.join(test_dir, "solidYellowLeft.mp4"))
yellow_clip = clip2.fl_image(process)
yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
| gpl-2.0 |
mikekestemont/ruzicka | code/05latin_testviz.py | 1 | 1217 | from __future__ import print_function
import os
import time
import json
import sys
import pickle
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sb
from scipy.spatial.distance import squareform, pdist
import numpy as np
from sklearn.preprocessing import LabelEncoder
from ruzicka.distance_metrics import pairwise_minmax
from ruzicka.visualization import clustermap, tree
from ruzicka.utilities import load_pan_dataset
from ruzicka.vectorization import Vectorizer
ngram_type = 'word'
ngram_size = 1
mfi = sys.maxint
vector_space = 'tf'
test_data, _ = load_pan_dataset('../data/latin/test/')
test_labels, test_documents = zip(*test_data)
# fit vectorizer on dev data:
vectorizer = Vectorizer(mfi=mfi,
min_df=2,
vector_space=vector_space,
ngram_type=ngram_type,
ngram_size=ngram_size)
X = vectorizer.fit_transform(test_documents).toarray()
dm = squareform(pdist(X, pairwise_minmax))
tree(dm, test_labels)
# scale distance matrix:
nonzeroes = dm[dm.nonzero()]
max_ = nonzeroes.max()
min_ = nonzeroes.min()
dm = (dm-min_) / (max_ - min_)
np.fill_diagonal(dm, 0.0)
clustermap(dm, test_labels)
| mit |
benoitsteiner/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 44 | 8747 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments, self.scores = self.make_random_points(
self.true_centers, self.num_points)
self.true_score = np.add.reduce(self.scores)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
means = [
np.mean(
points[assignments == center], axis=0)
for center in xrange(num_centers)
]
covs = [
np.cov(points[assignments == center].T)
for center in xrange(num_centers)
]
scores = []
for r in xrange(num_points):
scores.append(
np.sqrt(
np.dot(
np.dot(points[r, :] - means[assignments[r]],
np.linalg.inv(covs[assignments[r]])), points[r, :] -
means[assignments[r]])))
return (points, assignments, scores)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertGreater(score1, score2)
self.assertNear(self.true_score, score2, self.true_score * 0.15)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments, true_offsets = (
self.make_random_points(clusters, num_points))
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
# Test score
score = gmm.score(input_fn=self.input_fn(points=points,
batch_size=num_points), steps=1)
self.assertNear(score, np.sum(true_offsets), 4.05)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
if __name__ == '__main__':
test.main()
| apache-2.0 |
dancingdan/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 21 | 54488 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features],
0), array_ops.concat([labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
update_global_step = training_util.get_global_step().assign_add(1)
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return (const, const,
control_flow_ops.group(train_op_1, training_op_2,
update_global_step))
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, update_global_step
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(expected_features, expected_labels, actual_features,
actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1))
def _make_input_fn(features, labels):
def _input_fn():
return {k: constant_op.constant(v)
for k, v in six.iteritems(features)}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn,
params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features,
labels,
mode,
params,
config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return (constant_op.constant(0.), constant_op.constant(0.),
training_util.get_global_step().assign_add(1))
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing train_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(
dtype=dtypes.string, shape=[None], name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(features, labels, {
'examples': serialized_tf_example
})
est.export_savedmodel(
os.path.join(est.model_dir, 'export'), serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(
model_fn=linear_model_fn, config=config, model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError, 'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(
model_fn=linear_model_fn, config=config, model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(
model_fn=linear_model_fn, model_dir='test_dir', config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir, model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
# TODO(b/78461127): Please modify tests to not directly rely on names of
# checkpoints.
self.assertAllEqual(['model.ckpt-0', 'model.ckpt-5'],
ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1, model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2, model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={
'learning_rate': 0.01
}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(
input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testSummaryWritingWithSummaryProto(self):
def _streaming_mean_squared_error_histogram(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
metrics, update_ops = metric_ops.streaming_mean_squared_error(
predictions,
labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
return summary.histogram('histogram', metrics), update_ops
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={
'MSE': _streaming_mean_squared_error_histogram
})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('MSE' in output_values)
self.assertTrue(output_values['MSE'].HasField('histo'))
def testSummaryWritingWithTensor(self):
def _streaming_precition_mean_tensor(predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
return metric_ops.streaming_mean_tensor(
predictions,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={
'PMT': _streaming_precition_mean_tensor
})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('PMT' in output_values)
self.assertTrue(output_values['PMT'].HasField('tensor'))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertItemsEqual(['bogus_lookup', 'feature'], [
compat.as_str_any(x)
for x in graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS)
])
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_graph_transforms(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base,
serving_input_fn,
assets_extra=assets_extra,
graph_rewrite_specs=[
estimator.GraphRewriteSpec(['tag_1'], []),
estimator.GraphRewriteSpec(['tag_2', 'tag_3'],
['strip_unused_nodes'])
])
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
# tag_1 is untransformed.
tags = ['tag_1']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# Since there were no transforms, both save ops are still present.
self.assertTrue('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# Since there were no transforms, the hash table lookup is still there.
self.assertTrue('hash_table_Lookup' in graph_ops)
# Restore, to validate that the export was well-formed.
# tag_2, tag_3 was subjected to strip_unused_nodes.
tags = ['tag_2', 'tag_3']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# The Saver used to restore the checkpoint into the export Session
# was not added to the SAVERS collection, so strip_unused_nodes removes
# it. The one explicitly created in export_savedmodel is tracked in
# the MetaGraphDef saver_def field, so that one is retained.
# TODO(soergel): Make Savers sane again. I understand this is all a bit
# nuts but for now the test demonstrates what actually happens.
self.assertFalse('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# The fake hash table lookup wasn't connected to anything; stripped.
self.assertFalse('hash_table_Lookup' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual({
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
}, feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool), None)
)
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
DevinCharles/igpt | examples/mapGenerator.py | 1 | 3325 | # coding: utf-8
import folium
import pandas as pd
from os.path import splitext
def mapGenerator(files,data_info='default', map_opts='default',serve_files=False):
createMap(files,data_info, map_opts)
if serve_files:
try:
startServer()
except Exception as e:
print(str(e))
def getDefaults(empty_input):
if empty_input is 'map_opts':
return 'YlOrBr','Data'
elif empty_input is 'data_info':
return ['GRID_ID','FREQ'],'feature.properties.id',[40.0, -75.14]
def startServer(address = "127.0.0.1",port = 8765):
try:
# Python 2.x
from SimpleHTTPServer import SimpleHTTPRequestHandler
import SocketServer as socketserver
except:
# Python 3.X
from http.server import SimpleHTTPRequestHandler
import socketserver
Handler = SimpleHTTPRequestHandler
httpd = socketserver.TCPServer((address, port), Handler)
print('Serving on http://127.0.0.1:8765')
httpd.serve_forever()
def createMap(files,data_info='default', map_opts='default'):
# Input Handling
try:
geo_file,data_file = files
except ValueError:
print('You need to give shapefile name and a CSV data file name.')
return
try:
color,legend = map_opts
except ValueError:
color,legend = getDefaults('map_opts')
try:
columns,shape_id,center = data_info
except ValueError:
color,legend = getDefaults('data_info')
# If Shapefile is .shp, convert it
root,ext = splitext(geo_file)
if ext == '.shp':
from igpt import shp2GeoJson
shp2GeoJson(geo_file)
geo_file = root+'.json'
# Make Sure Name is .html
#root,ext = splitext(name)
#name = root+'.html'
# Check if Data file is dataframe or csv
if type(data_file) is pd.core.frame.DataFrame:
data = data_file
elif data_file[-3:] =='csv':
data = pd.read_csv(data_file)
else:
print('data_file must be a Pandas Data Frame or the path to a csv file')
return
# Build Shape ID
id_parts = shape_id.split('.')
if len(id_parts) == 1:
full_id = 'feature.'+id_parts[0]
else:
full_id = shape_id
# Create Simple Folium Map
map = folium.Map(location=center, zoom_start=11)
# try:
map.geo_json(geo_path=geo_file, data=data,
columns=columns,
key_on=full_id,
fill_color=color, fill_opacity=0.7, line_opacity=0.2,
legend_name=legend,
reset=True)
# except Exception as e:
# print(e)
# print('You tried ID: ',full_id)
# full_id = 'feature.properties.'+id_parts[-1]
# print('Trying again with ID: ',full_id)
# try:
# map.geo_json(geo_path=geo_file, data=data,
# columns=columns,
# key_on=full_id,
# fill_color=color, fill_opacity=0.7, line_opacity=0.2,
# legend_name=legend,
# reset=True)
# print('Sucess with new ID')
# except Exception as e:
# print('Failed again...')
# return
map.create_map(path='index.html')
if __name__ == "__main__":
main()
| gpl-2.0 |
jmschrei/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
evgchz/scikit-learn | examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py | 252 | 3490 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
ishanic/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ArtsiomCh/tensorflow | tensorflow/contrib/training/python/training/feeding_queue_runner_test.py | 76 | 5052 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
nmayorov/scikit-learn | sklearn/mixture/gmm.py | 19 | 30655 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
rahul-c1/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
heli522/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
nonsk131/USRP2016 | generate_tests0000-0999.py | 1 | 3341 | from isochrones.dartmouth import Dartmouth_Isochrone
from isochrones.utils import addmags
import numpy as np
import pandas as pd
file = open('/tigress/np5/true_params.txt','a')
def get_index(n):
if n < 10:
return '000' + str(n)
elif n < 100:
return '00' + str(n)
elif n < 1000:
return '0' + str(n)
else:
return str(n)
for n in range(0,1000,1):
index = get_index(n)
file.write('test: ' + index + '\n')
dar = Dartmouth_Isochrone()
array = np.random.rand(2) + 0.5
if array[0] > array[1]:
M1 = array[0]
M2 = array[1]
else:
M1 = array[1]
M2 = array[0]
age1 = np.log10(1e8)
age2 = np.log10(5e8)
feh1 = 0.0
array = 1400*np.random.rand(2) + 100
if array[0] > array[1]:
distance1 = array[0]
distance2 = array[1]
else:
distance1 = array[1]
distance2 = array[0]
AV1 = 0.0
feh2 = 0.2
AV2 = 0.1
params = (M1,M2,age1,age2,feh1,feh2,distance1,distance2,AV1,AV2)
params = str(params)
file.write('(M1,M2,age1,age2,feh1,feh2,distance1,distance2,AV1,AV2) = ' + params + '\n')
file.write('\n')
#Simulate true magnitudes
unresolved_bands = ['J','H','K']
resolved_bands = ['i','K']
args1 = (age1, feh1, distance1, AV1)
args2 = (age2, feh2, distance2, AV2)
unresolved = {b:addmags(dar.mag[b](M1, *args1), dar.mag[b](M2, *args2)) for b in unresolved_bands}
resolved_1 = {b:dar.mag[b](M1, *args1) for b in resolved_bands}
resolved_2 = {b:dar.mag[b](M2, *args2) for b in resolved_bands}
#print dar.mag['K'](M2, *args2)
#print unresolved, resolved_1, resolved_2
instruments = ['twomass','RAO']
bands = {'twomass':['J','H','K'],
'RAO':['i','K']}
mag_unc = {'twomass': 0.02, 'RAO':0.1}
resolution = {'twomass':4.0, 'RAO':0.1}
relative = {'twomass':False, 'RAO':True}
separation = 0.5
PA = 100.
columns = ['name', 'band', 'resolution', 'relative', 'separation', 'pa', 'mag', 'e_mag']
df = pd.DataFrame(columns=columns)
i=0
for inst in ['twomass']: #Unresolved observations
for b in bands[inst]:
row = {}
row['name'] = inst
row['band'] = b
row['resolution'] = resolution[inst]
row['relative'] = relative[inst]
row['separation'] = 0.
row['pa'] = 0.
row['mag'] = unresolved[b]
row['e_mag'] = mag_unc[inst]
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
for inst in ['RAO']: #Resolved observations
for b in bands[inst]:
mags = [resolved_1[b], resolved_2[b]]
pas = [0, PA]
seps = [0., separation]
for mag,sep,pa in zip(mags,seps,pas):
row = {}
row['name'] = inst
row['band'] = b
row['resolution'] = resolution[inst]
row['relative'] = relative[inst]
row['separation'] = sep
row['pa'] = pa
row['mag'] = mag
row['e_mag'] = mag_unc[inst]
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
#print df
df.to_csv(path_or_buf='/tigress/np5/df_binary_test{}.csv'.format(index))
file.close()
| mit |
raysteam/zeppelin | python/src/main/resources/python/zeppelin_python.py | 1 | 9358 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError, Py4JNetworkError
import warnings
import ast
import traceback
import warnings
import signal
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# for back compatibility
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(object):
""" A context impl that uses Py4j to communicate to JVM
"""
def __init__(self, z):
self.z = z
self.paramOption = gateway.jvm.org.apache.zeppelin.display.Input.ParamOption
self.javaList = gateway.jvm.java.util.ArrayList
self.max_result = 1000
self._displayhook = lambda *args: None
self._setup_matplotlib()
def getInterpreterContext(self):
return self.z.getCurrentInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.getGui().input(name, defaultValue)
def select(self, name, options, defaultValue=""):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
return self.z.getGui().select(name, defaultValue, javaOptions)
def checkbox(self, name, options, defaultChecked=[]):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
javaDefaultCheck = self.javaList()
for check in defaultChecked:
javaDefaultCheck.append(check)
return self.z.getGui().checkbox(name, javaDefaultCheck, javaOptions)
def show(self, p, **kwargs):
if hasattr(p, '__name__') and p.__name__ == "matplotlib.pyplot":
self.show_matplotlib(p, **kwargs)
elif type(p).__name__ == "DataFrame": # does not play well with sub-classes
# `isinstance(p, DataFrame)` would req `import pandas.core.frame.DataFrame`
# and so a dependency on pandas
self.show_dataframe(p, **kwargs)
elif hasattr(p, '__call__'):
p() #error reporting
def show_dataframe(self, df, show_index=False, **kwargs):
"""Pretty prints DF using Table Display System
"""
limit = len(df) > self.max_result
header_buf = StringIO("")
if show_index:
idx_name = str(df.index.name) if df.index.name is not None else ""
header_buf.write(idx_name + "\t")
header_buf.write(str(df.columns[0]))
for col in df.columns[1:]:
header_buf.write("\t")
header_buf.write(str(col))
header_buf.write("\n")
body_buf = StringIO("")
rows = df.head(self.max_result).values if limit else df.values
index = df.index.values
for idx, row in zip(index, rows):
if show_index:
body_buf.write("%html <strong>{}</strong>".format(idx))
body_buf.write("\t")
body_buf.write(str(row[0]))
for cell in row[1:]:
body_buf.write("\t")
body_buf.write(str(cell))
body_buf.write("\n")
body_buf.seek(0); header_buf.seek(0)
#TODO(bzz): fix it, so it shows red notice, as in Spark
print("%table " + header_buf.read() + body_buf.read()) # +
# ("\n<font color=red>Results are limited by {}.</font>" \
# .format(self.max_result) if limit else "")
#)
body_buf.close(); header_buf.close()
def show_matplotlib(self, p, fmt="png", width="auto", height="auto",
**kwargs):
"""Matplotlib show function
"""
if fmt == "png":
img = BytesIO()
p.savefig(img, format=fmt)
img_str = b"data:image/png;base64,"
img_str += base64.b64encode(img.getvalue().strip())
img_tag = "<img src={img} style='width={width};height:{height}'>"
# Decoding is necessary for Python 3 compability
img_str = img_str.decode("ascii")
img_str = img_tag.format(img=img_str, width=width, height=height)
elif fmt == "svg":
img = StringIO()
p.savefig(img, format=fmt)
img_str = img.getvalue()
else:
raise ValueError("fmt must be 'png' or 'svg'")
html = "%html <div style='width:{width};height:{height}'>{img}<div>"
print(html.format(width=width, height=height, img=img_str))
img.close()
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72,
fontsize=10, interactive=True, format='png')
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def handler_stop_signals(sig, frame):
sys.exit("Got signal : " + str(sig))
signal.signal(signal.SIGINT, handler_stop_signals)
host = "127.0.0.1"
if len(sys.argv) >= 3:
host = sys.argv[2]
_zcUserQueryNameSpace = {}
client = GatewayClient(address=host, port=int(sys.argv[1]))
#gateway = JavaGateway(client, auto_convert = True)
gateway = JavaGateway(client)
intp = gateway.entry_point
intp.onPythonScriptInitialized(os.getpid())
java_import(gateway.jvm, "org.apache.zeppelin.display.Input")
z = __zeppelin__ = PyZeppelinContext(intp)
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
_zcUserQueryNameSpace["z"] = z
output = Logger()
sys.stdout = output
#sys.stderr = output
while True :
req = intp.getStatements()
if req == None:
break
try:
stmts = req.statements().split("\n")
final_code = []
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
for s in stmts:
if s == None:
continue
# skip comment
s_stripped = s.strip()
if len(s_stripped) == 0 or s_stripped.startswith("#"):
continue
final_code.append(s)
if final_code:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
code = compile('\n'.join(final_code), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
except:
raise Exception(traceback.format_exc())
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except Py4JNetworkError:
# lost connection from gateway server. exit
sys.exit(1)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
fedhere/SESNCfAlib | getSpecOSNC.py | 1 | 1534 | import pandas as pd
import numpy as np
import json
import os
import glob
import inspect
import optparse
import time
import copy
import os
import pylab as pl
import numpy as np
import scipy
import json
import sys
import pickle as pkl
import scipy as sp
import numpy as np
from scipy import optimize
from scipy.interpolate import interp1d
from scipy import stats as spstats
from scipy import integrate
from scipy.interpolate import InterpolatedUnivariateSpline, UnivariateSpline,splrep, splev
from scipy import interpolate
s = json.load( open(os.getenv ('PUI2015')+"/fbb_matplotlibrc.json") )
pl.rcParams.update(s)
cmd_folder = os.path.realpath(os.getenv("SESNCFAlib"))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import snclasses as snstuff
import templutils as templutils
import utils as snutils
import fitutils as fitutils
import myastrotools as myas
import matplotlib as mpl
sn = sys.argv[1]
#'SN1993J'
#sn = 'SN2008bo'
print (sn)
tmp = pd.read_json("../sne.space_downloads/"+sn+".json")
snkey = tmp.columns
tmp = tmp[snkey[0]]
print (tmp)
if not 'spectra' in tmp.keys():
print ("no spectra")
sys.exit()
N = len(tmp['spectra'])
if N<1:
print ("no spectra")
sys.exit()
for i,sp in enumerate(tmp['spectra']):
name = sp['time']
fout = open("%s_%s_spec.dat"%(sn, name), "w")
fout.write( "#%s %s \n"%(sp["u_time"], sp["u_wavelengths"]))
for dp in sp['data']:
print (dp)
fout.write("%f %e \n"%(float(dp[0]), float(dp[1])))
fout.close()
| mit |
chenyyx/scikit-learn-doc-zh | examples/zh/svm/plot_iris.py | 1 | 4011 | # -*- coding:UTF-8 -*-
"""
===========================================================
在鸢尾花卉数据集上绘制不同的 SVM 分类器
===========================================================
在鸢尾花卉数据集的 2D 投影上的不同线性 SVM 分类器的比较。我们只考虑这个数据集的前 2 个特征:
- 萼片长度
- 萼片宽度
此示例显示如何绘制具有不同 kernel 的四个 SVM 分类器的决策表面。
线性模型 ``LinearSVC()`` 和 ``SVC(kernel='linear')`` 产生稍微不同的决策边界。这可能是以下差异的结果:
- ``LinearSVC`` 可以最大限度地减少 squared hinge loss 而 ``SVC`` 最大限度地减少 regular hinge loss.
- ``LinearSVC`` 使用 One-vs-All (也被称作 One-vs-Rest) multiclass reduction ,而 ``SVC`` 则使用 One-vs-One multiclass reduction 。
两个线性模型具有线性决策边界(相交超平面),而非线性内核模型(多项式或 高斯 RBF)具有更灵活的非线性决策边界,其形状取决于内核的种类及其参数。
.. NOTE:: 在绘制玩具 2D 数据集分类器的决策函数的时候可以帮助您直观了解其各自的表现力,请注意,这些直觉并不总是推广到更加接近于现实的高维度的问题。
"""
print(__doc__)
# 加载 numpy, matplotlib, sklearn 等模块
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""创建一个要绘制的点的网格
参数
----------
x: 基于 x 轴的网格数据
y: 基于 y 轴的网格数据
h: meshgrid 的步长参数, 是可选的
返回
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
# meshgrid() 函数用两个坐标轴上的点在平面上画格。具体参阅:https://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""绘制分类器的决策边界.
参数
----------
ax: matplotlib 轴对象
clf: 一个分类器
xx: meshgrid ndarray
yy: meshgrid ndarray
params: params 的字典传递给 contourf, 可选
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# contourf() 函数 具体用法参见:http://www.labri.fr/perso/nrougier/teaching/matplotlib/ 和 http://matplotlib.org/examples/pylab_examples/contourf_demo.html
out = ax.contourf(xx, yy, Z, **params)
return out
# 加载一些需要玩的数据
iris = datasets.load_iris()
# 只取前两个特征数据,我们可以通过 2 维数据集来避免这种情况
X = iris.data[:, :2]
y = iris.target
# 我们创建了一个 SVM 的实例并填充了数据。我们不扩展我们的数据,因为我们想绘制支持向量。
C = 1.0 # SVM 正则化参数
models = (svm.SVC(kernel='linear', C=C),
svm.LinearSVC(C=C),
svm.SVC(kernel='rbf', gamma=0.7, C=C),
svm.SVC(kernel='poly', degree=3, C=C))
models = (clf.fit(X, y) for clf in models)
# 绘图区域的标题
titles = ('SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# 设置 2x2 的网格进行绘制.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
| gpl-3.0 |
lbishal/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
ningchi/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 26 | 1523 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=4)
clf_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
clf_1.fit(X, y)
clf_2.fit(X, y)
# Predict
y_1 = clf_1.predict(X)
y_2 = clf_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
PatrickChrist/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 250 | 2233 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
tongwang01/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 19 | 2448 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols, labels
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_cols, hidden_units=[10, 10])
# Fit
regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)
# Score accuracy
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions
y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
print("Predictions: {}".format(str(y)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
stverhae/incubator-airflow | airflow/contrib/hooks/bigquery_hook.py | 9 | 38929 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import logging
import time
from apiclient.discovery import build, HttpError
from googleapiclient import errors
from builtins import range
from pandas.io.gbq import GbqConnector, \
_parse_data as gbq_parse_data, \
_check_google_client_version as gbq_check_google_client_version, \
_test_google_api_imports as gbq_test_google_api_imports
from pandas.tools.merge import concat
from past.builtins import basestring
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
logging.getLogger("bigquery").setLevel(logging.INFO)
class BigQueryHook(GoogleCloudBaseHook, DbApiHook):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None):
super(BigQueryHook, self).__init__(
conn_id=bigquery_conn_id,
delegate_to=delegate_to)
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(service=service, project_id=project)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build('bigquery', 'v2', http=http_authorized)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, bql, parameters=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param bql: The BigQuery SQL to execute.
:type bql: string
"""
service = self.get_service()
project = self._get_field('project')
connector = BigQueryPandasConnector(project, service)
schema, pages = connector.run_query(bql)
dataframe_list = []
while len(pages) > 0:
page = pages.pop()
dataframe_list.append(gbq_parse_data(schema, page))
if len(dataframe_list) > 0:
return concat(dataframe_list, ignore_index=True)
else:
return gbq_parse_data(schema, [])
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the table. The connection supplied to the hook
must provide access to the specified project.
:type project_id: string
:param dataset_id: The name of the dataset in which to look for the table.
storage bucket.
:type dataset_id: string
:param table_id: The name of the table to check the existence of.
:type table_id: string
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id
).execute()
return True
except errors.HttpError as e:
if e.resp['status'] == '404':
return False
raise
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self, project_id, service, reauth=False, verbose=False):
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(object):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id):
self.service = service
self.project_id = project_id
def run_query(
self, bql, destination_dataset_table = False,
write_disposition = 'WRITE_EMPTY',
allow_large_results=False,
udf_config = False,
use_legacy_sql=True):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: The BigQuery SQL to execute.
:type bql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:param write_disposition: What to do if the table already exists in
BigQuery.
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: boolean
"""
configuration = {
'query': {
'query': bql,
'useLegacySql': use_legacy_sql
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
configuration['query'].update({
'allowLargeResults': allow_large_results,
'writeDisposition': write_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources': udf_config
})
return self.run_with_configuration(configuration)
def run_extract( # noqa
self, source_project_dataset_table, destination_cloud_storage_uris,
compression='NONE', export_format='CSV', field_delimiter=',',
print_header=True):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED'):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
"""
source_project_dataset_tables = (
[source_project_dataset_tables]
if not isinstance(source_project_dataset_tables, list)
else source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields, source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
schema_update_options=()):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table> BigQuery table to load
data into. If <project> is not included, project will be the project defined
in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the load job.
:type schema_update_options: list
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
source_format = source_format.upper()
allowed_formats = ["CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS"]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION',
"ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options. "
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options)
)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
}
}
if schema_fields:
configuration['load']['schema'] = {
'fields': schema_fields
}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError(
"schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'."
)
else:
logging.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options)
)
configuration['load']['schemaUpdateOptions'] = schema_update_options
if source_format == 'CSV':
configuration['load']['skipLeadingRows'] = skip_leading_rows
configuration['load']['fieldDelimiter'] = field_delimiter
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {
'configuration': configuration
}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
job_id = query_reply['jobReference']['jobId']
# Wait for query to finish.
keep_polling_job = True
while (keep_polling_job):
try:
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
if (job['status']['state'] == 'DONE'):
keep_polling_job = False
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.format(
job['status']['errorResult'], job
)
)
else:
logging.info('Waiting for job to complete : %s, %s', self.project_id, job_id)
time.sleep(5)
except HttpError as err:
if err.resp.status in [500, 503]:
logging.info('%s: Retryable error, waiting for job to complete: %s', err.resp.status, job_id)
time.sleep(5)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s', err.resp.status)
return job_id
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, page_token=None, start_index=None):
"""
Get the data of a given dataset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (
self.service.tabledata()
.list(
projectId=self.project_id, datasetId=dataset_id,
tableId=table_id, **optional_params)
.execute()
)
def run_table_delete(self, deletion_dataset_table, ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
:return:
"""
assert '.' in deletion_dataset_table, (
'Expected deletion_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(deletion_dataset_table)
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
tables_resource = self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
logging.info('Deleted table %s:%s.%s.',
deletion_project, deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception(
'Table deletion failed. Table does not exist.')
else:
logging.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(projectId=project_id,
datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
logging.info('table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
logging.info('table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project = None,
view_project = None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(projectId=source_project,
datasetId=source_dataset).execute()
access = source_dataset_resource['access'] if 'access' in source_dataset_resource else []
view_access = {'view': {'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table}}
# check to see if the view we want to add already exists.
if view_access not in access:
logging.info('granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
access.append(view_access)
return self.service.datasets().patch(projectId=source_project,
datasetId=source_dataset,
body={'access': access}).execute()
else:
# if view is already in access, do nothing.
logging.info('table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
return source_dataset_resource
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id):
super(BigQueryCursor, self).__init__(service=service, project_id=project_id)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
bql = _bind_parameters(operation, parameters) if parameters else operation
self.job_id = self.run_query(bql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param parameters: List of dictionary parameters to substitute into the
query.
:type parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (
self.service.jobs()
.getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token)
.execute()
)
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a
list of tuples). An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If it is not given, the
cursor's arraysize determines the number of rows to be fetched. The method should try to
fetch as many rows as indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be returned.
An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences
(e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER' or bq_type == 'TIMESTAMP':
return int(string_field)
elif bq_type == 'FLOAT':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
assert default_project_id is not None, "INTERNAL: No default project is specified"
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
cmpt = table_input.split(':')
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
assert project_id is None, (
"{var}Use either : or . to specify project"
).format(var=var_print(var_name))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
logging.info(
'project not included in {var}: '
'{input}; using project "{project}"'.format(
var=var_name, input=table_input, project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
| apache-2.0 |
antgonza/qiita | qiita_pet/handlers/api_proxy/tests/test_artifact.py | 1 | 15279 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from os.path import join, exists
from os import remove, close
from datetime import datetime
from tempfile import mkstemp
from functools import partial
import pandas as pd
import numpy.testing as npt
from qiita_core.util import qiita_test_checker
from qiita_core.testing import wait_for_prep_information_job
from qiita_core.qiita_settings import r_client
from qiita_db.artifact import Artifact
from qiita_db.metadata_template.prep_template import PrepTemplate
from qiita_db.study import Study
from qiita_db.util import get_mountpoint
from qiita_db.software import Parameters, DefaultParameters
from qiita_db.exceptions import QiitaDBWarning
from qiita_pet.handlers.api_proxy.artifact import (
artifact_get_req, artifact_status_put_req, artifact_graph_get_req,
artifact_types_get_req, artifact_post_req, artifact_get_prep_req,
artifact_get_info)
from qiita_db.logger import LogEntry
class TestArtifactAPIReadOnly(TestCase):
def test_artifact_get_req_no_access(self):
obs = artifact_get_req('demo@microbio.me', 1)
exp = {'status': 'error',
'message': 'User does not have access to study'}
self.assertEqual(obs, exp)
def test_artifact_get_req(self):
obs = artifact_get_req('test@foo.bar', 1)
path_builder = partial(join, get_mountpoint('raw_data')[0][1])
exp = {'id': 1,
'type': 'FASTQ',
'study': 1,
'data_type': '18S',
'timestamp': datetime(2012, 10, 1, 9, 30, 27),
'visibility': 'private',
'can_submit_vamps': False,
'can_submit_ebi': False,
'processing_parameters': None,
'ebi_run_accessions': None,
'is_submitted_vamps': False,
'parents': [],
'filepaths': [
{'fp_id': 1,
'fp': path_builder("1_s_G1_L001_sequences.fastq.gz"),
'fp_type': "raw_forward_seqs",
'checksum': '2125826711',
'fp_size': 58},
{'fp_id': 2,
'fp': path_builder(
"1_s_G1_L001_sequences_barcodes.fastq.gz"),
'fp_type': "raw_barcodes",
'checksum': '2125826711',
'fp_size': 58}]
}
self.assertEqual(obs, exp)
def test_artifact_graph_get_req_ancestors(self):
obs = artifact_graph_get_req(1, 'ancestors', 'test@foo.bar')
exp = {'status': 'success',
'message': '',
'edge_list': [],
'node_labels': [(1, 'Raw data 1 - FASTQ')]}
self.assertEqual(obs, exp)
def test_artifact_graph_get_req_descendants(self):
obs = artifact_graph_get_req(1, 'descendants', 'test@foo.bar')
exp = {'status': 'success',
'message': '',
'node_labels': [(1, 'Raw data 1 - FASTQ'),
(3, 'Demultiplexed 2 - Demultiplexed'),
(2, 'Demultiplexed 1 - Demultiplexed'),
(4, 'BIOM - BIOM'),
(5, 'BIOM - BIOM'),
(6, 'BIOM - BIOM')],
'edge_list': [(1, 3), (1, 2), (2, 5), (2, 4), (2, 6)]}
self.assertEqual(obs['message'], exp['message'])
self.assertEqual(obs['status'], exp['status'])
self.assertCountEqual(obs['node_labels'], exp['node_labels'])
self.assertCountEqual(obs['edge_list'], exp['edge_list'])
def test_artifact_graph_get_req_no_access(self):
obs = artifact_graph_get_req(1, 'ancestors', 'demo@microbio.me')
exp = {'status': 'error',
'message': 'User does not have access to study'}
self.assertEqual(obs, exp)
def test_artifact_graph_get_req_bad_direction(self):
obs = artifact_graph_get_req(1, 'WRONG', 'test@foo.bar')
exp = {'status': 'error', 'message': 'Unknown directon WRONG'}
self.assertEqual(obs, exp)
def test_artifact_types_get_req(self):
obs = artifact_types_get_req()
exp = {'message': '',
'status': 'success',
'types': [['BIOM', 'BIOM table', False, False, True],
['Demultiplexed', 'Demultiplexed and QC sequences',
True, True, False],
['FASTA', None, False, False, False],
['FASTA_Sanger', None, False, False, False],
['FASTQ', None, False, False, True],
['SFF', None, False, False, False],
['beta_div_plots', 'Qiime 1 beta diversity results',
False, False, False],
['per_sample_FASTQ', None, True, False, True],
['rarefaction_curves', 'Rarefaction curves', False,
False, False],
['taxa_summary', 'Taxa summary plots', False, False,
False]]}
self.assertEqual(obs['message'], exp['message'])
self.assertEqual(obs['status'], exp['status'])
self.assertCountEqual(obs['types'], exp['types'])
@qiita_test_checker()
class TestArtifactAPI(TestCase):
def setUp(self):
uploads_path = get_mountpoint('uploads')[0][1]
# Create prep test file to point at
self.update_fp = join(uploads_path, '1', 'update.txt')
with open(self.update_fp, 'w') as f:
f.write("""sample_name\tnew_col\n1.SKD6.640190\tnew_value\n""")
self._files_to_remove = [self.update_fp]
self._files_to_remove = []
# creating temporal files and artifact
# NOTE: we don't need to remove the artifact created cause it's
# used to test the delete functionality
fd, fp = mkstemp(suffix='_seqs.fna')
close(fd)
with open(fp, 'w') as f:
f.write(">1.sid_r4_0 M02034:17:000000000-A5U18:1:1101:15370:1394 "
"1:N:0:1 orig_bc=CATGAGCT new_bc=CATGAGCT bc_diffs=0\n"
"GTGTGCCAGCAGCCGCGGTAATACGTAGGG\n")
# 4 Demultiplexed
filepaths_processed = [(fp, 4)]
# 1 for default parameters and input data
exp_params = Parameters.from_default_params(DefaultParameters(1),
{'input_data': 1})
self.artifact = Artifact.create(filepaths_processed, "Demultiplexed",
parents=[Artifact(1)],
processing_parameters=exp_params)
def tearDown(self):
for fp in self._files_to_remove:
if exists(fp):
remove(fp)
# Replace file if removed as part of function testing
uploads_path = get_mountpoint('uploads')[0][1]
fp = join(uploads_path, '1', 'uploaded_file.txt')
if not exists(fp):
with open(fp, 'w') as f:
f.write('')
r_client.flushdb()
def test_artifact_get_prep_req(self):
obs = artifact_get_prep_req('test@foo.bar', [4])
exp = {'status': 'success', 'msg': '', 'data': {
4: ['1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195',
'1.SKB4.640189', '1.SKB5.640181', '1.SKB6.640176',
'1.SKB7.640196', '1.SKB8.640193', '1.SKB9.640200',
'1.SKD1.640179', '1.SKD2.640178', '1.SKD3.640198',
'1.SKD4.640185', '1.SKD5.640186', '1.SKD6.640190',
'1.SKD7.640191', '1.SKD8.640184', '1.SKD9.640182',
'1.SKM1.640183', '1.SKM2.640199', '1.SKM3.640197',
'1.SKM4.640180', '1.SKM5.640177', '1.SKM6.640187',
'1.SKM7.640188', '1.SKM8.640201', '1.SKM9.640192']}}
self.assertEqual(obs, exp)
obs = artifact_get_prep_req('demo@microbio.me', [4])
exp = {'status': 'error',
'message': 'User does not have access to study'}
self.assertEqual(obs, exp)
def test_artifact_get_info(self):
obs = artifact_get_info('test@foo.bar', [5, 6, 7])
data = [
{'files': ['1_study_1001_closed_reference_otu_table_Silva.biom'],
'artifact_id': 6, 'data_type': '16S',
'timestamp': '2012-10-02 17:30:00', 'active': True,
'target_gene': '16S rRNA', 'name': 'BIOM',
'target_subfragment': ['V4'], 'parameters': {
'reference': '2', 'similarity': '0.97',
'sortmerna_e_value': '1', 'sortmerna_max_pos': '10000',
'threads': '1', 'sortmerna_coverage': '0.97'},
'algorithm': 'Pick closed-reference OTUs | Split libraries FASTQ',
'deprecated': False, 'platform': 'Illumina',
'algorithm_az': 'd480799a0a7a2fbe0e9022bc9c602018',
'prep_samples': 27},
{'files': ['1_study_1001_closed_reference_otu_table.biom'],
'artifact_id': 5, 'data_type': '18S',
'timestamp': '2012-10-02 17:30:00', 'active': True,
'target_gene': '16S rRNA', 'name': 'BIOM',
'target_subfragment': ['V4'], 'parameters': {
'reference': '1', 'similarity': '0.97',
'sortmerna_e_value': '1', 'sortmerna_max_pos': '10000',
'threads': '1', 'sortmerna_coverage': '0.97'},
'algorithm': 'Pick closed-reference OTUs | Split libraries FASTQ',
'deprecated': False, 'platform': 'Illumina',
'algorithm_az': 'd480799a0a7a2fbe0e9022bc9c602018',
'prep_samples': 27},
{'files': ['biom_table.biom'], 'artifact_id': 7,
'data_type': '16S',
'timestamp': '2012-10-02 17:30:00', 'active': True,
'target_gene': '16S rRNA', 'name': 'BIOM',
'target_subfragment': ['V4'], 'parameters': {}, 'algorithm': '',
'deprecated': False, 'platform': 'Illumina', 'algorithm_az': '',
'prep_samples': 27}]
exp = {'status': 'success', 'msg': '', 'data': data}
self.assertCountEqual(list(obs.keys()), exp.keys())
self.assertEqual(obs['status'], exp['status'])
self.assertEqual(obs['msg'], exp['msg'])
self.assertCountEqual(obs['data'], exp['data'])
def test_artifact_post_req(self):
# Create new prep template to attach artifact to
pt = npt.assert_warns(
QiitaDBWarning, PrepTemplate.create,
pd.DataFrame({'new_col': {'1.SKD6.640190': 1}}), Study(1), '16S')
self._files_to_remove.extend([fp for _, fp in pt.get_filepaths()])
filepaths = {'raw_forward_seqs': 'uploaded_file.txt',
'raw_barcodes': 'update.txt'}
obs = artifact_post_req(
'test@foo.bar', filepaths, 'FASTQ', 'New Test Artifact', pt.id)
exp = {'status': 'success',
'message': ''}
self.assertEqual(obs, exp)
wait_for_prep_information_job(pt.id)
# Test importing an artifact
# Create new prep template to attach artifact to
pt = npt.assert_warns(
QiitaDBWarning, PrepTemplate.create,
pd.DataFrame({'new_col': {'1.SKD6.640190': 1}}), Study(1), '16S')
self._files_to_remove.extend([fp for _, fp in pt.get_filepaths()])
obs = artifact_post_req(
'test@foo.bar', {}, 'Demultiplexed', 'New Test Artifact 2',
pt.id, 3)
exp = {'status': 'success',
'message': ''}
self.assertEqual(obs, exp)
wait_for_prep_information_job(pt.id)
# Instantiate the artifact to make sure it was made and
# to clean the environment
a = Artifact(pt.artifact.id)
self._files_to_remove.extend([x['fp'] for x in a.filepaths])
def test_artifact_post_req_error(self):
# Create a new prep template to attach the artifact to
pt = npt.assert_warns(
QiitaDBWarning, PrepTemplate.create,
pd.DataFrame({'new_col': {'1.SKD6.640190': 1}}), Study(1), '16S')
self._files_to_remove.extend([fp for _, fp in pt.get_filepaths()])
user_id = 'test@foo.bar'
filepaths = {'raw_barcodes': 'uploaded_file.txt',
'raw_forward_seqs': 'update.txt'}
artifact_type = "FASTQ"
name = "TestArtifact"
# The user doesn't have access to the study
obs = artifact_post_req("demo@microbio.me", filepaths, artifact_type,
name, pt.id)
exp = {'status': 'error',
'message': 'User does not have access to study'}
self.assertEqual(obs, exp)
# A file does not exist
missing_fps = {'raw_barcodes': 'NOTEXISTS'}
obs = artifact_post_req(user_id, missing_fps, artifact_type,
name, pt.id)
exp = {'status': 'error',
'message': 'File does not exist: NOTEXISTS'}
self.assertEqual(obs, exp)
# Cleaned filepaths is empty
empty_fps = {'raw_barcodes': '', 'raw_forward_seqs': ''}
obs = artifact_post_req(user_id, empty_fps, artifact_type, name, pt.id)
exp = {'status': 'error',
'message': "Can't create artifact, no files provided."}
self.assertEqual(obs, exp)
def test_artifact_status_put_req(self):
obs = artifact_status_put_req(1, 'test@foo.bar', 'sandbox')
exp = {'status': 'success',
'message': 'Artifact visibility changed to sandbox'}
self.assertEqual(obs, exp)
def test_artifact_status_put_req_private(self):
obs = artifact_status_put_req(1, 'admin@foo.bar', 'private')
exp = {'status': 'success',
'message': 'Artifact visibility changed to private'}
self.assertEqual(obs, exp)
# testing that the log message is generated
self.assertEqual(
LogEntry.newest_records(1)[0].msg,
'admin@foo.bar changed artifact 1 (study 1) to private')
def test_artifact_status_put_req_private_bad_permissions(self):
obs = artifact_status_put_req(1, 'test@foo.bar', 'private')
exp = {'status': 'error',
'message': 'User does not have permissions to approve change'}
self.assertEqual(obs, exp)
def test_artifact_status_put_req_no_access(self):
obs = artifact_status_put_req(1, 'demo@microbio.me', 'sandbox')
exp = {'status': 'error',
'message': 'User does not have access to study'}
self.assertEqual(obs, exp)
def test_artifact_status_put_req_unknown_status(self):
obs = artifact_status_put_req(1, 'test@foo.bar', 'BADSTAT')
exp = {'status': 'error',
'message': 'Unknown visibility value: BADSTAT'}
self.assertEqual(obs, exp)
if __name__ == "__main__":
main()
| bsd-3-clause |
kyleabeauchamp/EnsemblePaper | code/figures/plot_1D_example.py | 1 | 2904 | import scipy.stats
import ALA3
import numpy as np
import matplotlib.pyplot as plt
from fitensemble import belt
import matplotlib
matplotlib.rcParams.update({'font.size': 18})
num_frames = 10000000
z = np.random.normal(size=(num_frames, 1))
rho = np.random.random_integers(0, 1, size=(num_frames, 1)) * 2 - 1
x = 0.5 * (z + 2) * rho
prior_pops = np.ones(num_frames) / float(num_frames)
num_bins = 50
use_log = False
scale = 1.0
ymin = 1E-5
ymax = 0.12
num_grid = 100
grid = np.linspace(-7, 7, num_grid)
alpha = np.array([0.0])
p = belt.get_populations_from_alpha(alpha, x, prior_pops)
plt.hist(x, weights=p, bins=num_bins,color="b", log=use_log)
y = grid * alpha[0]
#plt.plot(grid, y, 'k')
plt.title(r"$\alpha_1 = 0$")
plt.xlabel("Observable: $f_1(x)$")
plt.ylabel("Population")
plt.ylim([ymin, ymax])
plt.savefig(ALA3.outdir+"/model_hist0.pdf",bbox_inches='tight')
alpha = np.array([-scale])
p = belt.get_populations_from_alpha(alpha, x, prior_pops)
plt.figure()
plt.hist(x,weights=p,bins=num_bins,color="g", log=use_log)
y = grid * alpha[0]
#plt.plot(grid, y, 'k')
plt.title(r"$\alpha_1 = %d$" % -scale)
plt.xlabel("Observable: $f_1(x)$")
plt.ylabel("Population")
plt.ylim([ymin, ymax])
plt.savefig(ALA3.outdir+"/model_hist%d.pdf" % alpha[0],bbox_inches='tight')
alpha = np.array([scale])
p = belt.get_populations_from_alpha(alpha, x, prior_pops)
plt.figure()
plt.hist(x,weights=p,bins=num_bins, color="r", log=use_log)
y = grid * alpha[0]
#plt.plot(grid, y, 'k')
plt.title(r"$\alpha_1 = %d$" % scale)
plt.xlabel("Observable: $f_1(x)$")
plt.ylabel("Population")
plt.ylim([ymin, ymax])
plt.savefig(ALA3.outdir+"/model_hist%d.pdf" % alpha[0],bbox_inches='tight')
FE_ymin = 2
FE_ymax = 6
num_grid = 300
kt = 0.593
kde = scipy.stats.kde.gaussian_kde(x.T)
grid = np.linspace(-4, 4, num_grid)
p0 = kde.evaluate(np.array([grid]))
alpha = np.array([0.0])
p = belt.get_populations_from_alpha(alpha, np.array([grid]).T, p0)
FE = -kt * np.log(p)
plt.figure()
plt.plot(grid, FE)
plt.title(r"$\alpha_1 = 0$")
plt.xlabel("Observable: $f_1(x)$")
plt.ylabel("Free Energy")
plt.ylim([FE_ymin, FE_ymax])
plt.savefig(ALA3.outdir + "/model_landscape%d.pdf" % alpha[0], bbox_inches='tight')
alpha = np.array([scale])
p = belt.get_populations_from_alpha(alpha, np.array([grid]).T, p0)
FE = -kt * np.log(p)
plt.figure()
plt.plot(grid, FE)
plt.title(r"$\alpha_1 = %d$" % scale)
plt.xlabel("Observable: $f_1(x)$")
plt.ylabel("Free Energy")
plt.ylim([FE_ymin, FE_ymax])
plt.savefig(ALA3.outdir + "/model_landscape%d.pdf" % alpha[0], bbox_inches='tight')
alpha = np.array([-scale])
p = belt.get_populations_from_alpha(alpha, np.array([grid]).T, p0)
FE = -kt * np.log(p)
plt.figure()
plt.plot(grid, FE)
plt.title(r"$\alpha_1 = %d$" % -scale)
plt.xlabel("Observable: $f_1(x)$")
plt.ylabel("Free Energy")
plt.ylim([FE_ymin, FE_ymax])
plt.savefig(ALA3.outdir + "/model_landscape%d.pdf" % alpha[0], bbox_inches='tight')
| gpl-3.0 |
lazywei/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
kwentz10/Photosynthesis_Optimization_Modeling | Trait_Based_Photo_Model_Meadow_Plant_Traits_Plot_Each_Meadow.py | 1 | 19462 | # -*- coding: utf-8 -*-
"""
Photosynthesis and Stomatal Conductance Model
Created 9/27/2016
Katherine Wentz
This is a program that runs photosynthesis and
stomatal conductance given changes in leaf-
level traits. I derive photosynthesis from a
stomatal conductance model. That way I am
able to void the ci term. I am breaking up the code
into 2 different models. The first model
pretends that there is no intercept term in
the Ball-Berry stomatal conductance model. The
second model contains the intercept term.
The end product is graphs of NUE vs. WUE.
Update: I am going to run the model for plants with
traits that are distinctive of the meadow moisture
gradient in the alpine tundra.
"""
#Chlorophyll, temp of leaf, m, s,nm change the relationship between wue and nue.
#but nm and sla are dependent variables, so they either have to both be constant or both be varying
#---------------Import Modules---------------#
import itertools as it
import numpy as np
from matplotlib import pyplot as plt
#The line of code below is for if I want to input all combinations of changed parameters into my model:
from leaf_parameter_inputs import leaf_params
#The line of code below imports the temperature functions used in this model
from photo_functions import arr_temp, bol_temp, pa_con_atmfrac
#---------------Photosynthesis + Stomatal Conductance Model---------------#
##---Intercept of Carboxylation Rate vs. Light Absorption Rate---##
j_b=20.9 #intercept coefficient
##---Maximum Slope of Carboxylation Rate vs. Light Absorption Rate---##
j_m_max=2.9 #slope coefficient
##---Maximum Chlorophyll---##
chl_max=1000 #Chlorophyll Content of leaves (umol/m2)
##---Convert Moles to Mass of Chlorophyll---##
chl_mass=0.89351 #1 umol of Chlorophyll= 0.89351 mg Chlorophyll
##---Amount of Ribulose Bisphosphate Produced per Chlorophyll---##
rc=120 #nmol RuBP/ mg Chlorophyll
##---Conversion Coefficient to Convert Chlorophyll Content to Ribulose Bisphosphate Content---##
crc=chl_mass*rc
##---Rubisco Maximum Content---##
rub_max=(chl_max*crc)/1000 #(umol RuBP/m2)
##---Air Temperature---##
t=20 #degrees C
##---Constant Parameter Arrays for Model---##
#I have commented out parameters that I am assuming are variable (for the time being)
s_c=np.linspace(0.019-0.0005,0.019+0.0005,2)#specific leaf area (m2 C/g C)
ra=np.zeros(shape=2)+20.7 #specific rubisco activity (umol CO2/g Rub s)
nm_c=((s_c*(100.0**2))*0.077+20.25)/1000.0#leaf nitrogen (g N/ g C)
flnr=np.zeros(shape=2)+0.65 #fraction of leaf nitrogen in rubisco (g N Rub/g N leaf)
frnr=np.zeros(shape=2)+6.25 #weight fraction of nitrogen in rubisco molecule (g Rub/g N Rub)
ea_str=pa_con_atmfrac(611*np.exp(17.27*t/(t+273.3))) #saturation vapor pressure of air (Pa-->umol h20.mol air)
rh=np.zeros(shape=2)+0.55 #relative humidity (kPa/kPa)
ea=rh*ea_str #vapor pressure deficit (umol h2O/mol air)
ca=np.zeros(shape=2)+410 #ambient carbon dioxide (umol CO2/mol air)
tau25=np.zeros(shape=2)+2904.12 #specifity coefficient of tau at 25 C (unitless)
ko25=np.zeros(shape=2)+296100 #Michaelis-Menten kinetic coefficient for oxygen at 25 C(umol/mol)
kc25=np.zeros(shape=2)+ 296 #Michaelis-Menten kinetic coefficient for carbon dioxide at 25 C (umol/mol)
o=np.zeros(shape=2)+210000 #concentration of ambient oxygen (umol/mol)
#lamb=np.zeros(shape=3)+0.0074 #marginal WUE (umol CO2/umol H2O)
b=np.zeros(shape=2)+0.0 #Ball-Berry stomatal conductance intercept parameter
a=np.zeros(shape=2)+1.6 #Conversion Coefficient between stomatal conductance to water and carbon dioxide
chl_c=np.zeros(shape=2)+400 #Chlorophyll Content of the Leaf (umol chl/m2)
tl_c=np.zeros(shape=2)+(31+273.15) #Temperature of the Leaf (K)
vwc_c=np.zeros(shape=2)+0.15 #Soil Volumetric Water Content (cm3/cm3)
vwc_min=0.08 #minimum soil water content for photosynthesis to occur (permanent wilting point) (cm3/cm3)
vwc_max=0.3 #maximum soil water content where increases in soil water do not affect photosynthesis (field capacity?) (cm3/cm3)
q=0.2 #parameter for soil water affect on photosynthesis (unitless)
ij_c=np.zeros(shape=2)+0.96 #leaf area & angle index--downregulates jmax
m_c=np.zeros(shape=2)+15.0 #ball-berry parameter (unitless)
##--What Variable Parameters are Constant?
dict_params=[]
for xx in it.combinations(['nm','chl','tl','m','ij','vwc'],0):
dict_params+=[xx]
if dict_params==[()]:
dict_params=['nan']
for ii in range(len(dict_params)):
#---------------Initialize Plot---------------#
##---Figure With Subplots Blueprint---##
#fb1=plt.figure(1,figsize=(12,2))
#axA = fb1.add_subplot(121)
#axB = fb1.add_subplot(122)
##---Figure Without Subplots Blueprint---##
#put in correct ax value (e.g. axA, axB)
fig,axA = plt.subplots(figsize=(10,5))
##---Define Plot Parameters Based on Graph Interests---##
axA.set_xlabel('NUE (umol CO2/g N s)',fontsize=23, fontname='Times New Roman')
axA.set_ylabel('WUE (umol CO2/mmol H2O)',fontsize=23, fontname='Times New Roman')
axA.set_xlim([0,20])
axA.set_ylim([0,10])
axA.set_title('Growth Response Across Four Plant Trait Assemblages: all params vary', fontname='Times New Roman',fontsize=23,fontweight='bold')
# axA.set_title('Growth Response: constant %s, %s, %s, %s, %s' % (dict_params[ii][0],dict_params[ii][1],dict_params[ii][2],dict_params[ii][3],dict_params[ii][4]), fontname='Times New Roman',fontsize=23,fontweight='bold')
##---Line Type for Each Plant---##
n=16 #number of variable parameter combinations for each meadow type
color=['k']*n+['r']*n+['y']*n+['g']*n+['b']*n
marker=['d']*n+['o']*n+['v']*n+['*']*n+['^']*n
##---Initialize Arrays for Each Meadow---##
wue_f=[]
nue_f=[]
wue_d=[]
nue_d=[]
wue_m=[]
nue_m=[]
wue_w=[]
nue_w=[]
wue_s=[]
nue_s=[]
##---Variable Parameter Arrays for Model---##
for i in range(len(leaf_params)):
for key,val in leaf_params[i].items():
exec(key + '=val')
#set variables constant
if 'nm' in dict_params[ii]:
nm=nm_c
s=s_c #nm and s are dependent variables
if 'm' in dict_params[ii]:
m=m_c
if 'chl' in dict_params[ii]:
chl=chl_c
if 'tl' in dict_params[ii]:
tl=tl_c
if 'vwc' in dict_params[ii]:
vwc=vwc_c
if 'ij' in dict_params[ii]:
ij=ij_c
#
# print dict_params[ii]
# print 's:',s
# print 'nm:',nm
# print 'm:',m
# print 'chl:',chl
# print 'tl:',tl
# print 'vwc:',vwc
# print 'ij:',ij
##---Calculated Parameter Arrays for Model(Constant+Variable Plant Trait(s))---##
es_str=pa_con_atmfrac(611*np.exp(17.27*(tl-273.15)/((tl-273.15)+273.3))) #calculate saturation vapor pressure of surface (Pa)
d=es_str-ea #calculate vapor pressure deficit (umol H2O/mol air)
l=1/s #leaf mass per unit area (g C/m2 C)
na=nm*l #leaf nitrogen (g N/ m2 C)
#below is commented out because I am no longer using a variable lambda parameter
# m=ca/(rh*d*lamb) ##Ball-Berry stomatal conductance slope parameter (unitless)
rub=(chl*crc)/1000 # find ribulose bisphosphate content (umol RuBP/m2)
if all(rub<rub_max):
j_m=j_m_max*(rub/rub_max)*ij #find j_m slope based on ribulose bisphosphate content & leaf area/angle index
else:
j_m=j_m_max*ij
vopt=frnr*flnr*ra*na #optimal carboxylation rate, limited by CO2 (umol CO2/m2s)
jopt=vopt*j_m+j_b #optimal carboxylation rate, limited by RuBP (umol CO2/m2s)
##---Temperature Effects on Parameters---##
#parameters
tk_25=298.16; #absolute temperature at 25 C
ekc=80500.0 #Activation energy for K of CO2 (J mol-1)
eko=14500.0 #Activation energy for K of O2 (J mol-1)
etau=-29000.0 #Activation energy for tau (???) (J mol-1)
ev=55000.0 #Activation energy for carboxylation (J mol-1)
ej=55000.0 #Activation energy for electron transport (J mol-1)
toptv=298.0 #Optimum temperature for maximum carboxylation (K)
toptj=298.0 #Optimum temperature for maximum electron transport (K)
#calculated parameters due to temperature
kc=arr_temp(kc25,ekc,tk_25,tl) #Michaelis-Menten kinetic coefficient for carbon dioxide at leaf temperature (umol/mol)
ko=arr_temp(ko25,eko,tk_25,tl) #Michaelis-Menten kinetic coefficient for oxygen at leaf temperature (umol/mol)
tau=arr_temp(tau25,etau,tk_25,tl) #specifity coefficient of tau at leaf temperature (unitless)
gamma=o/(2*tau) #carbon dioxide compensation point (umol/mol)
vmax1=bol_temp(vopt,ev,toptv,tl) #carboxylation rate at leaf temperature, limited by CO2 (umol CO2/m2s)
jmax1=bol_temp(jopt,ej,toptj,tl) #carboxylation rate at leaf temperature, limited by RuBP (umol CO2/m2s)
##---Soil Moisture Effect on Parameters---##
if all(vwc>=vwc_max):
Wfac=1
elif all(vwc<vwc_max):
Wfac=((vwc-vwc_min)/(vwc_max-vwc_min))**q
vmax=Wfac*vmax1
jmax=Wfac*jmax1
##---Define a1 and a2 depending on whether plant is rubisco limited or light limited---##
#rubisco limited
a1_r=vmax
a2_r=kc*(1+(o/ko))
#light limited
a1_l=jmax/4
a2_l=2*gamma
##---(1)Photosynthesis and Stomatal Conductance Models (b is not taken into account)---##
if any(b==0.0):
#In order to generate this model I combined the following equations:
#A=gsc*(ca-ci)
#gsc=gsw/a
#gsw=mArh/ca
#solve for ci
#plug into A=a1(ci-gamma)/ci+a2
#Rubisco Limiting: a1=vcmax; a2=kc(1+o/ko)
#Light Limiting: a1=2.2*vcmax/4; a2=2*gamma
#Solve for Assimilation
ci=ca-((a*ca)/(m*rh)) #internal carbon dioxide (umol/mol)
##---Rubisco-Limited Assimilation---##
A_r=(a1_r*(ci-gamma))/(ci+a2_r) #rubisco limited assimilation rate (umol CO2/m2s)
##---Light-Limited Assimilation---##
A_l=(a1_l*(ci-gamma))/(ci+a2_l) #light limited assimilation rate (umol CO2/m2s)
##---Determine Rate-Limiting Assimilation---##
A=[]
for xx in range(len(A_r)):
if A_r[xx]<A_l[xx]:
A+=[A_r[xx]] #rubisco limited
elif A_l[xx]<A_r[xx]:
A+=[A_l[xx]] #light limited
else:
A+=[A_l[xx]] #both light and rubisco limited
##---Solve for Stomatal Conductance to Water---##
gsw=m*A*rh/ca #stomatal conductance to water (mol air/m2s)
##---Solve for Evapotranspiration---##
E=gsw*d #(umol H2O/m2s)
##---(2)Photosynthesis and Stomatal Conductance Models (with b)---##
elif any(b>0.0):
#In order to generate this model I combined the following equations:
#A=gsc*(ca-ci)
#gsc=gsw/a
#gsw=mArh/ca+b
#solve for ci
#plug into A=a1(ci-gamma)/ci+a2
#Rubisco Limiting: a1=vcmax; a2=kc(1+o/ko)
#Light Limiting: a1=2.2*vcmax/4; a2=2*gamma
#Solve for Assimilation Using Quadratic Equation
##---Rubisco-Limited Assimilation---##
aa_r=m*rh*ca-a*ca+m*rh*a2_r
bb_r=b*(ca**2)+b*ca*a2_r-a1_r*m*rh*ca+a*ca*a1_r+a1_r*m*rh*gamma
cc_r=a1_r*b*(ca**2)+gamma*b*ca*a1_r
A1_r=(-bb_r+np.sqrt(bb_r**2-4*aa_r*cc_r))/(2*aa_r)
A2_r=(-bb_r-np.sqrt(bb_r**2-4*aa_r*cc_r))/(2*aa_r)
#Choose Highest Values for Assimilation and Conductance
A_r=[]
for j in range(len(A1_r)):
if A1_r[j]>A2_r[j]:
A_r+=[A1_r[j]]
elif A2_r[j]>A1_r[j]:
A_r+=[A2_r[j]]
else:
A_r+=[A1_r[j]]
##---Light-Limited Assimilation---##
aa_l=m*rh*ca-a*ca+m*rh*a2_l
bb_l=b*(ca**2)+b*ca*a2_l-a1_l*m*rh*ca+a*ca*a1_l+a1_l*m*rh*gamma
cc_l=a1_l*b*(ca**2)+gamma*b*ca*a1_l
A1_l=(-bb_l+np.sqrt(bb_l**2-4*aa_l*cc_l))/(2*aa_l)
A2_l=(-bb_l-np.sqrt(bb_l**2-4*aa_l*cc_l))/(2*aa_l)
#Choose Highest Values for Assimilation and Conductance
A_l=[]
for j in range(len(A1_l)):
if A1_l[j]>A2_l[j]:
A_l+=[A1_l[j]]
elif A2_l[j]>A1_l[j]:
A_l+=[A2_l[j]]
else:
A_l+=[A1_l[j]]
##---Determine Rate-Limiting Assimilation---##
A=[]
for xx in range(len(A_r)):
if A_r[xx]<A_l[xx]:
A+=[A_r[xx]] #rubisco limited
elif A_l[xx]<A_r[xx]:
A+=[A_l[xx]] #light limited
else:
A+=[A_l[xx]] #both light and rubisco limited
##---Solve for Stomatal Conductance to Water---##
gsw=m*A*rh/ca #stomatal conductance to water (mol H2O/m2s) #make array from list
##---Solve for Evapotranspiration---##
E=gsw*d #(umol H2O/m2s)
#---------------Test for Nan or Negative Values---------------#
for xxx in range(len(A)):
if np.isnan(A[xxx]):
print "A array contains nan values"
break
if A[xxx]<0.0:
print "A array contains negative values"
break
if np.isnan(gsw[xxx]):
print "gsw array contains nan values"
break
if gsw[xxx]<0.0:
print "gsw array contains negative values"
break
if np.isnan(E[xxx]):
print "E array contains nan values"
break
if E[xxx]<0.0:
print "E array contains negative values"
break
#---------------WUE vs. NUE---------------#
wue=np.diff(A)/np.diff(E)*1000.0 #multiply by 1000 to get from umol CO2/umol H20 to umol CO2/mmol H20
nue=np.diff(A)/np.diff(na)
#---------------Test for Low NUE Values---------------#
# if any(nue<15):
# break
#---------------Make Array of Values for Each Meadow---------------#
if i+1<17:
wue_f+=[wue]
nue_f+=[nue]
elif i+1>=17 and i+1<33:
wue_d+=[wue]
nue_d+=[nue]
elif i+1>=33 and i+1<49:
wue_m+=[wue]
nue_m+=[nue]
elif i+1>=49 and i+1<65:
wue_w+=[wue]
nue_w+=[nue]
elif i+1>=65 and i+1<81:
wue_s+=[wue]
nue_s+=[nue]
#---------------Plot NUE vs. WUE---------------#
#I am plotting NUE vs. WUE for each plant trait
##---Separate Plots Into Different Figures: Set Up---##
#fb=plt.figure(i+1,figsize=(6,6)) #figure blueprint
#fig,ax1 = plt.subplots()
#ax1.set_xlabel('NUE (umol CO2/g N s)',fontsize=12)
#ax1.set_ylabel('WUE (umol CO2/umol H20)',fontsize=12)
#ax1.set_title('NUE vs. WUE',fontsize=14)
##---Plot---##
#will need to change ax value if plotting separate graphs for each iteration, e.g. ax1
# axA.plot(nue,wue,label='%s' %trait[i], color='%s' %color[i],marker='%s' %marker[i],linestyle='%s' %style[i])
axA.plot([max(nue_f),max(nue_f),min(nue_f),min(nue_f),max(nue_f)],[max(wue_f),min(wue_f),min(wue_f),max(wue_f),max(wue_f)],color='red', linestyle='-', linewidth=8.5, label='fellfield')
# axA.scatter([max(nue_f),max(nue_f),min(nue_f),min(nue_f),max(nue_f)],[max(wue_f),min(wue_f),min(wue_f),max(wue_f),max(wue_f)],color='k', marker='^', label='fellfield')
axA.fill_between([np.min(nue_f),np.max(nue_f)],np.min(wue_f),np.max(wue_f),color='red',alpha=0.5)
axA.plot([max(nue_d),max(nue_d),min(nue_d),min(nue_d),max(nue_d)],[max(wue_d),min(wue_d),min(wue_d),max(wue_d),max(wue_d)],color='brown', linestyle='-',linewidth=6.5, label='dry meadow')
# axA.scatter([max(nue_d),max(nue_d),min(nue_d),min(nue_d),max(nue_d)],[max(wue_d),min(wue_d),min(wue_d),max(wue_d),max(wue_d)],color='r', marker='d',label='dry meadow')
axA.fill_between([np.min(nue_d),np.max(nue_d)],np.min(wue_d),np.max(wue_d),color='brown',alpha=0.5)
axA.plot([max(nue_m),max(nue_m),min(nue_m),min(nue_m),max(nue_m)],[max(wue_m),min(wue_m),min(wue_m),max(wue_m),max(wue_m)],color='orange', linestyle='-',linewidth=4.5, label='moist meadow')
# axA.scatter([max(nue_m),max(nue_m),min(nue_m),min(nue_m),max(nue_m)],[max(wue_m),min(wue_m),min(wue_m),max(wue_m),max(wue_m)],color='y', marker='o',s=40,label='moist meadow')
axA.fill_between([np.min(nue_m),np.max(nue_m)],np.min(wue_m),np.max(wue_m),color='orange',alpha=0.5)
axA.plot([max(nue_w),max(nue_w),min(nue_w),min(nue_w),max(nue_w)],[max(wue_w),min(wue_w),min(wue_w),max(wue_w),max(wue_w)],color='g', linestyle='-',linewidth=2.0,label='wet meadow')
# axA.scatter([max(nue_w),max(nue_w),min(nue_w),min(nue_w),max(nue_w)],[max(wue_w),min(wue_w),min(wue_w),max(wue_w),max(wue_w)],facecolor='none',edgecolor='g', marker='o',s=120,label='wet meadow')
axA.fill_between([np.min(nue_w),np.max(nue_w)],np.min(wue_w),np.max(wue_w),color='green',alpha=0.5)
axA.plot([max(nue_s),max(nue_s),min(nue_s),min(nue_s),max(nue_s)],[max(wue_s),min(wue_s),min(wue_s),max(wue_s),max(wue_s)],color='c', linestyle='-',linewidth=1.0, label='snowbed')
# axA.scatter([max(nue_s),max(nue_s),min(nue_s),min(nue_s),max(nue_s)],[max(wue_s),min(wue_s),min(wue_s),max(wue_s),max(wue_s)],color='b', marker='*', label='snowbed')
axA.fill_between([np.min(nue_s),np.max(nue_s)],np.min(wue_s),np.max(wue_s),color='cyan',alpha=0.5)
##---Separate Plots Into Different Figures: Legend---##
#ax1.legend(loc=4)
##---------------Nutrient Use Efficiency and Water Use Efficiency--------------- #
##I am plotting the change in plant trait vs. two y axes: WUE and NUE
#fb3=plt.figure(3,figsize=(6,6)) #figure blueprint
#fig, ax1 = plt.subplots()
# axB=axA.twinx() #second y axis
# axA.set_xlabel('m (umol CO2/m2s)',fontsize=12)
# axA.set_ylabel('NUE (umol CO2/g N s)',fontsize=12)
# axB.set_ylabel('WUE (umol CO2/mol H20)',fontsize=12)
# axA.scatter(m[0:2],nue,color=color[i],label='NUE')
# axB.scatter(m[0:2],wue,color=color[i],label='WUE')
# ax1.set_title('NUE vs. WUE with Increasing Photosynthesis',fontsize=14)
## ax1.legend(loc=2)
# ax2.legend(loc=1)
#---------------Make Plot Interactive---------------#
#
# plt.pause(0.0001)
# plt.ion()
#end of sensitivity analysis iterations
#---------------Finalize Figure---------------#
#axA refers to first figure in subplot; axB refers to second figure in subplot
#if only one axis is run then the figure is just one plot
##---Legend---##
axA.legend(bbox_to_anchor=(1, 1), loc='left', prop={'size':11})
##---Save Figure--##
plt.savefig('NUE_vs_WUE_variable_test.png')
| mit |
CforED/Machine-Learning | sklearn/_build_utils/__init__.py | 21 | 1125 | """
Utilities useful during the build.
"""
# author: Andy Mueller, Gael Varoquaux
# license: BSD
from __future__ import division, print_function, absolute_import
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'sklearn'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
from numpy.distutils.system_info import get_info
def get_blas_info():
def atlas_not_found(blas_info_):
def_macros = blas_info.get('define_macros', [])
for x in def_macros:
if x[0] == "NO_ATLAS_INFO":
# if x[1] != 1 we should have lapack
# how do we do that now?
return True
if x[0] == "ATLAS_INFO":
if "None" in x[1]:
# this one turned up on FreeBSD
return True
return False
blas_info = get_info('blas_opt', 0)
if (not blas_info) or atlas_not_found(blas_info):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
return cblas_libs, blas_info
| bsd-3-clause |
joernhees/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 23 | 3957 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_estimate_bandwidth_with_sparse_matrix():
# Test estimate_bandwidth with sparse matrix
X = sparse.lil_matrix((1000, 1000))
msg = "A sparse matrix was passed, but dense data is required."
assert_raise_message(TypeError, msg, estimate_bandwidth, X, 200)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_, ms2.cluster_centers_)
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
DBernardes/ProjetoECC | Eficiência_Quântica/Codigo/CCDinfo.py | 1 | 1100 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Criado em 18 de Outubro de 2016
Descricao: este modulo tem como entrada o cabecalho de uma imagen fits e a quantidade de imagens da serie obtidam retornado uma string com as principais informacoes do CCD.
@author: Denis Varise Bernardes & Eder Martioli
Laboratorio Nacional de Astrofisica, Brazil.
"""
__version__ = "1.0"
__copyright__ = """
Copyright (c) ... All rights reserved.
"""
import matplotlib.pyplot as plt
def CCDinfo(header,nImages):
date = header['date'].split('T')
plt.xticks(())
plt.yticks(())
text = 'Camera: ' + header['head'] +'\n' + 'Data do experimento: %s %s ' %(date[0], date[1]) +'\n' + 'Quantidade de imagens: %i ' %(nImages) + '\n' + 'Modo de Leitura: %s' %(header['ACQMODE']) + '\n' + 'Taxa de leitura: %.2f MHz'%(1/(header['readtime']*1000000)) + '\n' + 'Pre-amplificacao: %i' %(header['preamp']) + '\n' + 'VShift Speed: %.3f e-6' %(header['vshift']*1000000)
return text
| mit |
ShawnMurd/MetPy | tests/calc/test_basic.py | 1 | 32604 | # Copyright (c) 2008,2015,2017,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `basic` module."""
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from metpy.calc import (add_height_to_pressure, add_pressure_to_height,
altimeter_to_sea_level_pressure, altimeter_to_station_pressure,
apparent_temperature, coriolis_parameter, geopotential_to_height,
heat_index, height_to_geopotential, height_to_pressure_std,
pressure_to_height_std, sigma_to_pressure, smooth_circular,
smooth_gaussian, smooth_n_point, smooth_rectangular, smooth_window,
wind_components, wind_direction, wind_speed, windchill)
from metpy.testing import (assert_almost_equal, assert_array_almost_equal, assert_array_equal)
from metpy.units import units
def test_wind_comps_basic():
"""Test the basic wind component calculation."""
speed = np.array([4, 4, 4, 4, 25, 25, 25, 25, 10.]) * units.mph
dirs = np.array([0, 45, 90, 135, 180, 225, 270, 315, 360]) * units.deg
s2 = np.sqrt(2.)
u, v = wind_components(speed, dirs)
true_u = np.array([0, -4 / s2, -4, -4 / s2, 0, 25 / s2, 25, 25 / s2, 0]) * units.mph
true_v = np.array([-4, -4 / s2, 0, 4 / s2, 25, 25 / s2, 0, -25 / s2, -10]) * units.mph
assert_array_almost_equal(true_u, u, 4)
assert_array_almost_equal(true_v, v, 4)
def test_wind_comps_with_north_and_calm():
"""Test that the wind component calculation handles northerly and calm winds."""
speed = np.array([0, 5, 5]) * units.mph
dirs = np.array([0, 360, 0]) * units.deg
u, v = wind_components(speed, dirs)
true_u = np.array([0, 0, 0]) * units.mph
true_v = np.array([0, -5, -5]) * units.mph
assert_array_almost_equal(true_u, u, 4)
assert_array_almost_equal(true_v, v, 4)
def test_wind_comps_scalar():
"""Test wind components calculation with scalars."""
u, v = wind_components(8 * units('m/s'), 150 * units.deg)
assert_almost_equal(u, -4 * units('m/s'), 3)
assert_almost_equal(v, 6.9282 * units('m/s'), 3)
def test_speed():
"""Test calculating wind speed."""
u = np.array([4., 2., 0., 0.]) * units('m/s')
v = np.array([0., 2., 4., 0.]) * units('m/s')
speed = wind_speed(u, v)
s2 = np.sqrt(2.)
true_speed = np.array([4., 2 * s2, 4., 0.]) * units('m/s')
assert_array_almost_equal(true_speed, speed, 4)
def test_direction():
"""Test calculating wind direction."""
u = np.array([4., 2., 0., 0.]) * units('m/s')
v = np.array([0., 2., 4., 0.]) * units('m/s')
direc = wind_direction(u, v)
true_dir = np.array([270., 225., 180., 0.]) * units.deg
assert_array_almost_equal(true_dir, direc, 4)
def test_direction_masked():
"""Test calculating wind direction from masked wind components."""
mask = np.array([True, False, True, False])
u = np.array([4., 2., 0., 0.])
v = np.array([0., 2., 4., 0.])
u_masked = units.Quantity(np.ma.array(u, mask=mask), units('m/s'))
v_masked = units.Quantity(np.ma.array(v, mask=mask), units('m/s'))
direc = wind_direction(u_masked, v_masked)
true_dir = np.array([270., 225., 180., 0.])
true_dir_masked = units.Quantity(np.ma.array(true_dir, mask=mask), units.deg)
assert_array_almost_equal(true_dir_masked, direc, 4)
def test_direction_with_north_and_calm():
"""Test how wind direction handles northerly and calm winds."""
u = np.array([0., -0., 0.]) * units('m/s')
v = np.array([0., 0., -5.]) * units('m/s')
direc = wind_direction(u, v)
true_dir = np.array([0., 0., 360.]) * units.deg
assert_array_almost_equal(true_dir, direc, 4)
def test_direction_dimensions():
"""Verify wind_direction returns degrees."""
d = wind_direction(3. * units('m/s'), 4. * units('m/s'))
assert str(d.units) == 'degree'
def test_oceanographic_direction():
"""Test oceanographic direction (to) convention."""
d = wind_direction(5 * units('m/s'), -5 * units('m/s'), convention='to')
true_dir = 135 * units.deg
assert_almost_equal(d, true_dir, 4)
def test_invalid_direction_convention():
"""Test the error that is returned if the convention kwarg is not valid."""
with pytest.raises(ValueError):
wind_direction(1 * units('m/s'), 5 * units('m/s'), convention='test')
def test_speed_direction_roundtrip():
"""Test round-tripping between speed/direction and components."""
# Test each quadrant of the whole circle
wspd = np.array([15., 5., 2., 10.]) * units.meters / units.seconds
wdir = np.array([160., 30., 225., 350.]) * units.degrees
u, v = wind_components(wspd, wdir)
wdir_out = wind_direction(u, v)
wspd_out = wind_speed(u, v)
assert_array_almost_equal(wspd, wspd_out, 4)
assert_array_almost_equal(wdir, wdir_out, 4)
def test_scalar_speed():
"""Test wind speed with scalars."""
s = wind_speed(-3. * units('m/s'), -4. * units('m/s'))
assert_almost_equal(s, 5. * units('m/s'), 3)
def test_scalar_direction():
"""Test wind direction with scalars."""
d = wind_direction(3. * units('m/s'), 4. * units('m/s'))
assert_almost_equal(d, 216.870 * units.deg, 3)
def test_windchill_scalar():
"""Test wind chill with scalars."""
wc = windchill(-5 * units.degC, 35 * units('m/s'))
assert_almost_equal(wc, -18.9357 * units.degC, 0)
def test_windchill_basic():
"""Test the basic wind chill calculation."""
temp = np.array([40, -10, -45, 20]) * units.degF
speed = np.array([5, 55, 25, 15]) * units.mph
wc = windchill(temp, speed)
values = np.array([36, -46, -84, 6]) * units.degF
assert_array_almost_equal(wc, values, 0)
def test_windchill_kelvin():
"""Test wind chill when given Kelvin temperatures."""
wc = windchill(268.15 * units.kelvin, 35 * units('m/s'))
assert_almost_equal(wc, -18.9357 * units.degC, 0)
def test_windchill_invalid():
"""Test windchill for values that should be masked."""
temp = np.array([10, 51, 49, 60, 80, 81]) * units.degF
speed = np.array([4, 4, 3, 1, 10, 39]) * units.mph
wc = windchill(temp, speed)
# We don't care about the masked values
truth = units.Quantity(np.ma.array([2.6230789, np.nan, np.nan, np.nan, np.nan, np.nan],
mask=[False, True, True, True, True, True]), units.degF)
assert_array_almost_equal(truth, wc)
def test_windchill_undefined_flag():
"""Test whether masking values for windchill can be disabled."""
temp = units.Quantity(np.ma.array([49, 50, 49, 60, 80, 81]), units.degF)
speed = units.Quantity(([4, 4, 3, 1, 10, 39]), units.mph)
wc = windchill(temp, speed, mask_undefined=False)
mask = np.array([False] * 6)
assert_array_equal(wc.mask, mask)
def test_windchill_face_level():
"""Test windchill using the face_level flag."""
temp = np.array([20, 0, -20, -40]) * units.degF
speed = np.array([15, 30, 45, 60]) * units.mph
wc = windchill(temp, speed, face_level_winds=True)
values = np.array([3, -30, -64, -98]) * units.degF
assert_array_almost_equal(wc, values, 0)
def test_heat_index_basic():
"""Test the basic heat index calculation."""
temp = np.array([80, 88, 92, 110, 86]) * units.degF
rh = np.array([40, 100, 70, 40, 88]) * units.percent
hi = heat_index(temp, rh)
values = np.array([80, 121, 112, 136, 104]) * units.degF
assert_array_almost_equal(hi, values, 0)
def test_heat_index_scalar():
"""Test heat index using scalars."""
hi = heat_index(96 * units.degF, 65 * units.percent)
assert_almost_equal(hi, 121 * units.degF, 0)
def test_heat_index_invalid():
"""Test heat index for values that should be masked."""
temp = np.array([80, 88, 92, 79, 30, 81]) * units.degF
rh = np.array([40, 39, 2, 70, 50, 39]) * units.percent
hi = heat_index(temp, rh)
mask = np.array([False, False, False, True, True, False])
assert_array_equal(hi.mask, mask)
def test_heat_index_undefined_flag():
"""Test whether masking values can be disabled for heat index."""
temp = units.Quantity(np.ma.array([80, 88, 92, 79, 30, 81]), units.degF)
rh = units.Quantity(np.ma.array([40, 39, 2, 70, 50, 39]), units.percent)
hi = heat_index(temp, rh, mask_undefined=False)
mask = np.array([False] * 6)
assert_array_equal(hi.mask, mask)
def test_heat_index_units():
"""Test units coming out of heat index."""
temp = units.Quantity([35., 20.], units.degC)
rh = 70 * units.percent
hi = heat_index(temp, rh)
assert_almost_equal(hi.to('degC'), units.Quantity([50.3405, np.nan], units.degC), 4)
def test_heat_index_ratio():
"""Test giving humidity as number [0, 1] to heat index."""
temp = units.Quantity([35., 20.], units.degC)
rh = 0.7
hi = heat_index(temp, rh)
assert_almost_equal(hi.to('degC'), units.Quantity([50.3405, np.nan], units.degC), 4)
def test_heat_index_vs_nws():
"""Test heat_index against online calculated HI from NWS Website."""
# https://www.wpc.ncep.noaa.gov/html/heatindex.shtml, visited 2019-Jul-17
temp = units.Quantity(np.array([86, 111, 40, 96]), units.degF)
rh = np.ma.array([45, 27, 99, 60]) * units.percent
hi = heat_index(temp, rh)
truth = units.Quantity(np.ma.array([87, 121, 40, 116], mask=[False, False, True, False]),
units.degF)
assert_array_almost_equal(hi, truth, 0)
def test_heat_index_kelvin():
"""Test heat_index when given Kelvin temperatures."""
temp = 308.15 * units.degK
rh = 0.7
hi = heat_index(temp, rh)
# NB rounded up test value here vs the above two tests
assert_almost_equal(hi.to('degC'), 50.3406 * units.degC, 4)
def test_height_to_geopotential():
"""Test conversion from height to geopotential."""
height = units.Quantity([0, 1000, 2000, 3000], units.m)
geopot = height_to_geopotential(height)
assert_array_almost_equal(geopot, units.Quantity([0., 9805, 19607,
29406], units('m**2 / second**2')), 0)
# See #1075 regarding previous destructive cancellation in floating point
def test_height_to_geopotential_32bit():
"""Test conversion to geopotential with 32-bit values."""
heights = np.linspace(20597, 20598, 11, dtype=np.float32) * units.m
truth = np.array([201336.67, 201337.66, 201338.62, 201339.61, 201340.58, 201341.56,
201342.53, 201343.52, 201344.48, 201345.44, 201346.42],
dtype=np.float32) * units('J/kg')
assert_almost_equal(height_to_geopotential(heights), truth, 2)
def test_geopotential_to_height():
"""Test conversion from geopotential to height."""
geopotential = units.Quantity([0., 9805.11102602, 19607.14506998, 29406.10358006],
units('m**2 / second**2'))
height = geopotential_to_height(geopotential)
assert_array_almost_equal(height, units.Quantity([0, 1000, 2000, 3000], units.m), 0)
# See #1075 regarding previous destructive cancellation in floating point
def test_geopotential_to_height_32bit():
"""Test conversion from geopotential to height with 32-bit values."""
geopot = np.arange(201590, 201600, dtype=np.float32) * units('J/kg')
truth = np.array([20623.000, 20623.102, 20623.203, 20623.307, 20623.408,
20623.512, 20623.615, 20623.717, 20623.820, 20623.924],
dtype=np.float32) * units.m
assert_almost_equal(geopotential_to_height(geopot), truth, 2)
# class TestIrrad(object):
# def test_basic(self):
# 'Test the basic solar irradiance calculation.'
# from datetime import date
# d = date(2008, 9, 28)
# lat = 35.25
# hours = np.linspace(6,18,10)
# s = solar_irradiance(lat, d, hours)
# values = np.array([0., 344.1, 682.6, 933.9, 1067.6, 1067.6, 933.9,
# 682.6, 344.1, 0.])
# assert_array_almost_equal(s, values, 1)
# def test_scalar(self):
# from datetime import date
# d = date(2008, 9, 28)
# lat = 35.25
# hour = 9.5
# s = solar_irradiance(lat, d, hour)
# assert_almost_equal(s, 852.1, 1)
# def test_invalid(self):
# 'Test for values that should be masked.'
# from datetime import date
# d = date(2008, 9, 28)
# lat = 35.25
# hours = np.linspace(0,22,12)
# s = solar_irradiance(lat, d, hours)
# mask = np.array([ True, True, True, True, False, False, False,
# False, False, True, True, True])
# assert_array_equal(s.mask, mask)
def test_pressure_to_heights_basic():
"""Test basic pressure to height calculation for standard atmosphere."""
pressures = np.array([975.2, 987.5, 956., 943.]) * units.mbar
heights = pressure_to_height_std(pressures)
values = np.array([321.5, 216.5, 487.6, 601.7]) * units.meter
assert_almost_equal(heights, values, 1)
def test_heights_to_pressure_basic():
"""Test basic height to pressure calculation for standard atmosphere."""
heights = np.array([321.5, 216.5, 487.6, 601.7]) * units.meter
pressures = height_to_pressure_std(heights)
values = np.array([975.2, 987.5, 956., 943.]) * units.mbar
assert_almost_equal(pressures, values, 1)
def test_pressure_to_heights_units():
"""Test that passing non-mbar units works."""
assert_almost_equal(pressure_to_height_std(29 * units.inHg), 262.859 * units.meter, 3)
def test_coriolis_force():
"""Test basic coriolis force calculation."""
lat = np.array([-90., -30., 0., 30., 90.]) * units.degrees
cor = coriolis_parameter(lat)
values = np.array([-1.4584232E-4, -.72921159E-4, 0, .72921159E-4,
1.4584232E-4]) * units('s^-1')
assert_almost_equal(cor, values, 7)
def test_add_height_to_pressure():
"""Test the pressure at height above pressure calculation."""
pressure = add_height_to_pressure(1000 * units.hPa, 877.17421094 * units.meter)
assert_almost_equal(pressure, 900 * units.hPa, 5)
def test_add_pressure_to_height():
"""Test the height at pressure above height calculation."""
height = add_pressure_to_height(110.8286757 * units.m, 100 * units.hPa)
assert_almost_equal(height, 988.0028867 * units.meter, 3)
def test_sigma_to_pressure():
"""Test sigma_to_pressure."""
surface_pressure = 1000. * units.hPa
model_top_pressure = 0. * units.hPa
sigma = np.arange(0., 1.1, 0.1)
expected = np.arange(0., 1100., 100.) * units.hPa
pressure = sigma_to_pressure(sigma, surface_pressure, model_top_pressure)
assert_array_almost_equal(pressure, expected, 5)
def test_warning_dir():
"""Test that warning is raised wind direction > 2Pi."""
with pytest.warns(UserWarning):
wind_components(3. * units('m/s'), 270)
def test_coriolis_warning():
"""Test that warning is raise when latitude larger than pi radians."""
with pytest.warns(UserWarning):
coriolis_parameter(50)
with pytest.warns(UserWarning):
coriolis_parameter(-50)
def test_coriolis_units():
"""Test that coriolis returns units of 1/second."""
f = coriolis_parameter(50 * units.degrees)
assert f.units == units('1/second')
def test_apparent_temperature():
"""Test the apparent temperature calculation."""
temperature = np.array([[90, 90, 70],
[20, 20, 60]]) * units.degF
rel_humidity = np.array([[60, 20, 60],
[10, 10, 10]]) * units.percent
wind = np.array([[5, 3, 3],
[10, 1, 10]]) * units.mph
truth = units.Quantity(np.ma.array([[99.6777178, 86.3357671, 70], [8.8140662, 20, 60]],
mask=[[False, False, True], [False, True, True]]),
units.degF)
res = apparent_temperature(temperature, rel_humidity, wind)
assert_array_almost_equal(res, truth, 6)
def test_apparent_temperature_scalar():
"""Test the apparent temperature calculation with a scalar."""
temperature = 90 * units.degF
rel_humidity = 60 * units.percent
wind = 5 * units.mph
truth = 99.6777178 * units.degF
res = apparent_temperature(temperature, rel_humidity, wind)
assert_almost_equal(res, truth, 6)
def test_apparent_temperature_scalar_no_modification():
"""Test the apparent temperature calculation with a scalar that is NOOP."""
temperature = 70 * units.degF
rel_humidity = 60 * units.percent
wind = 5 * units.mph
truth = 70 * units.degF
res = apparent_temperature(temperature, rel_humidity, wind, mask_undefined=False)
assert_almost_equal(res, truth, 6)
def test_apparent_temperature_windchill():
"""Test that apparent temperature works when a windchill is calculated."""
temperature = -5. * units.degC
rel_humidity = 50. * units.percent
wind = 35. * units('m/s')
truth = -18.9357 * units.degC
res = apparent_temperature(temperature, rel_humidity, wind)
assert_almost_equal(res, truth, 0)
def test_apparent_temperature_mask_undefined_false():
"""Test that apparent temperature works when mask_undefined is False."""
temp = np.array([80, 55, 10]) * units.degF
rh = np.array([40, 50, 25]) * units.percent
wind = np.array([5, 4, 10]) * units('m/s')
app_temperature = apparent_temperature(temp, rh, wind, mask_undefined=False)
assert not hasattr(app_temperature, 'mask')
def test_apparent_temperature_mask_undefined_true():
"""Test that apparent temperature works when mask_undefined is True."""
temp = np.array([80, 55, 10]) * units.degF
rh = np.array([40, 50, 25]) * units.percent
wind = np.array([5, 4, 10]) * units('m/s')
app_temperature = apparent_temperature(temp, rh, wind, mask_undefined=True)
mask = [False, True, False]
assert_array_equal(app_temperature.mask, mask)
def test_smooth_gaussian():
"""Test the smooth_gaussian function with a larger n."""
m = 10
s = np.zeros((m, m))
for i in np.ndindex(s.shape):
s[i] = i[0] + i[1]**2
s = smooth_gaussian(s, 4)
s_true = np.array([[0.40077472, 1.59215426, 4.59665817, 9.59665817, 16.59665817,
25.59665817, 36.59665817, 49.59665817, 64.51108392, 77.87487258],
[1.20939518, 2.40077472, 5.40527863, 10.40527863, 17.40527863,
26.40527863, 37.40527863, 50.40527863, 65.31970438, 78.68349304],
[2.20489127, 3.39627081, 6.40077472, 11.40077472, 18.40077472,
27.40077472, 38.40077472, 51.40077472, 66.31520047, 79.67898913],
[3.20489127, 4.39627081, 7.40077472, 12.40077472, 19.40077472,
28.40077472, 39.40077472, 52.40077472, 67.31520047, 80.67898913],
[4.20489127, 5.39627081, 8.40077472, 13.40077472, 20.40077472,
29.40077472, 40.40077472, 53.40077472, 68.31520047, 81.67898913],
[5.20489127, 6.39627081, 9.40077472, 14.40077472, 21.40077472,
30.40077472, 41.40077472, 54.40077472, 69.31520047, 82.67898913],
[6.20489127, 7.39627081, 10.40077472, 15.40077472, 22.40077472,
31.40077472, 42.40077472, 55.40077472, 70.31520047, 83.67898913],
[7.20489127, 8.39627081, 11.40077472, 16.40077472, 23.40077472,
32.40077472, 43.40077472, 56.40077472, 71.31520047, 84.67898913],
[8.20038736, 9.3917669, 12.39627081, 17.39627081, 24.39627081,
33.39627081, 44.39627081, 57.39627081, 72.31069656, 85.67448522],
[9.00900782, 10.20038736, 13.20489127, 18.20489127, 25.20489127,
34.20489127, 45.20489127, 58.20489127, 73.11931702, 86.48310568]])
assert_array_almost_equal(s, s_true)
def test_smooth_gaussian_small_n():
"""Test the smooth_gaussian function with a smaller n."""
m = 5
s = np.zeros((m, m))
for i in np.ndindex(s.shape):
s[i] = i[0] + i[1]**2
s = smooth_gaussian(s, 1)
s_true = [[0.0141798077, 1.02126971, 4.02126971, 9.02126971, 15.9574606],
[1.00708990, 2.01417981, 5.01417981, 10.0141798, 16.9503707],
[2.00708990, 3.01417981, 6.01417981, 11.0141798, 17.9503707],
[3.00708990, 4.01417981, 7.01417981, 12.0141798, 18.9503707],
[4.00000000, 5.00708990, 8.00708990, 13.0070899, 19.9432808]]
assert_array_almost_equal(s, s_true)
def test_smooth_gaussian_3d_units():
"""Test the smooth_gaussian function with units and a 3D array."""
m = 5
s = np.zeros((3, m, m))
for i in np.ndindex(s.shape):
s[i] = i[1] + i[2]**2
s[0::2, :, :] = 10 * s[0::2, :, :]
s = s * units('m')
s = smooth_gaussian(s, 1)
s_true = ([[0.0141798077, 1.02126971, 4.02126971, 9.02126971, 15.9574606],
[1.00708990, 2.01417981, 5.01417981, 10.0141798, 16.9503707],
[2.00708990, 3.01417981, 6.01417981, 11.0141798, 17.9503707],
[3.00708990, 4.01417981, 7.01417981, 12.0141798, 18.9503707],
[4.00000000, 5.00708990, 8.00708990, 13.0070899, 19.9432808]]) * units('m')
assert_array_almost_equal(s[1, :, :], s_true)
def test_smooth_n_pt_5():
"""Test the smooth_n_pt function using 5 points."""
hght = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]])
shght = smooth_n_point(hght, 5, 1)
s_true = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5675.75, 5666.375, 5658.875, 5651.],
[5728., 5711.5, 5692.75, 5677.75, 5662.],
[5772., 5747.25, 5719.125, 5696.625, 5673.],
[5816., 5784., 5744., 5716., 5684.]])
assert_array_almost_equal(shght, s_true)
def test_smooth_n_pt_5_units():
"""Test the smooth_n_pt function using 5 points with units."""
hght = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]]) * units.meter
shght = smooth_n_point(hght, 5, 1)
s_true = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5675.75, 5666.375, 5658.875, 5651.],
[5728., 5711.5, 5692.75, 5677.75, 5662.],
[5772., 5747.25, 5719.125, 5696.625, 5673.],
[5816., 5784., 5744., 5716., 5684.]]) * units.meter
assert_array_almost_equal(shght, s_true)
def test_smooth_n_pt_9_units():
"""Test the smooth_n_pt function using 9 points with units."""
hght = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]]) * units.meter
shght = smooth_n_point(hght, 9, 1)
s_true = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5675.5, 5666.75, 5658.75, 5651.],
[5728., 5711., 5693.5, 5677.5, 5662.],
[5772., 5746.5, 5720.25, 5696.25, 5673.],
[5816., 5784., 5744., 5716., 5684.]]) * units.meter
assert_array_almost_equal(shght, s_true)
def test_smooth_n_pt_9_repeat():
"""Test the smooth_n_pt function using 9 points with two passes."""
hght = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]])
shght = smooth_n_point(hght, 9, 2)
s_true = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5675.4375, 5666.9375, 5658.8125, 5651.],
[5728., 5710.875, 5693.875, 5677.625, 5662.],
[5772., 5746.375, 5720.625, 5696.375, 5673.],
[5816., 5784., 5744., 5716., 5684.]])
assert_array_almost_equal(shght, s_true)
def test_smooth_n_pt_wrong_number():
"""Test the smooth_n_pt function using wrong number of points."""
hght = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]])
with pytest.raises(ValueError):
smooth_n_point(hght, 7)
def test_smooth_n_pt_3d_units():
"""Test the smooth_n_point function with a 3D array with units."""
hght = [[[5640.0, 5640.0, 5640.0, 5640.0, 5640.0],
[5684.0, 5676.0, 5666.0, 5659.0, 5651.0],
[5728.0, 5712.0, 5692.0, 5678.0, 5662.0],
[5772.0, 5748.0, 5718.0, 5697.0, 5673.0],
[5816.0, 5784.0, 5744.0, 5716.0, 5684.0]],
[[6768.0, 6768.0, 6768.0, 6768.0, 6768.0],
[6820.8, 6811.2, 6799.2, 6790.8, 6781.2],
[6873.6, 6854.4, 6830.4, 6813.6, 6794.4],
[6926.4, 6897.6, 6861.6, 6836.4, 6807.6],
[6979.2, 6940.8, 6892.8, 6859.2, 6820.8]]] * units.m
shght = smooth_n_point(hght, 9, 2)
s_true = [[[5640., 5640., 5640., 5640., 5640.],
[5684., 5675.4375, 5666.9375, 5658.8125, 5651.],
[5728., 5710.875, 5693.875, 5677.625, 5662.],
[5772., 5746.375, 5720.625, 5696.375, 5673.],
[5816., 5784., 5744., 5716., 5684.]],
[[6768., 6768., 6768., 6768., 6768.],
[6820.8, 6810.525, 6800.325, 6790.575, 6781.2],
[6873.6, 6853.05, 6832.65, 6813.15, 6794.4],
[6926.4, 6895.65, 6864.75, 6835.65, 6807.6],
[6979.2, 6940.8, 6892.8, 6859.2, 6820.8]]] * units.m
assert_array_almost_equal(shght, s_true)
def test_smooth_n_pt_temperature():
"""Test the smooth_n_pt function with temperature units."""
t = np.array([[2.73, 3.43, 6.53, 7.13, 4.83],
[3.73, 4.93, 6.13, 6.63, 8.23],
[3.03, 4.83, 6.03, 7.23, 7.63],
[3.33, 4.63, 7.23, 6.73, 6.23],
[3.93, 3.03, 7.43, 9.23, 9.23]]) * units.degC
smooth_t = smooth_n_point(t, 9, 1)
smooth_t_true = np.array([[2.73, 3.43, 6.53, 7.13, 4.83],
[3.73, 4.6425, 5.96125, 6.81124, 8.23],
[3.03, 4.81125, 6.1175, 6.92375, 7.63],
[3.33, 4.73625, 6.43, 7.3175, 6.23],
[3.93, 3.03, 7.43, 9.23, 9.23]]) * units.degC
assert_array_almost_equal(smooth_t, smooth_t_true, 4)
def test_smooth_gaussian_temperature():
"""Test the smooth_gaussian function with temperature units."""
t = np.array([[2.73, 3.43, 6.53, 7.13, 4.83],
[3.73, 4.93, 6.13, 6.63, 8.23],
[3.03, 4.83, 6.03, 7.23, 7.63],
[3.33, 4.63, 7.23, 6.73, 6.23],
[3.93, 3.03, 7.43, 9.23, 9.23]]) * units.degC
smooth_t = smooth_gaussian(t, 3)
smooth_t_true = np.array([[2.8892, 3.7657, 6.2805, 6.8532, 5.3174],
[3.6852, 4.799, 6.0844, 6.7816, 7.7617],
[3.2762, 4.787, 6.117, 7.0792, 7.5181],
[3.4618, 4.6384, 6.886, 6.982, 6.6653],
[3.8115, 3.626, 7.1705, 8.8528, 8.9605]]) * units.degC
assert_array_almost_equal(smooth_t, smooth_t_true, 4)
def test_smooth_window():
"""Test smooth_window with default configuration."""
hght = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
smoothed = smooth_window(hght, np.array([[1, 0, 1], [0, 0, 0], [1, 0, 1]]))
truth = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5675., 5667.5, 5658.5, 5651.],
[5728., 5710., 5695., 5677., 5662.],
[5772., 5745., 5722.5, 5695.5, 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
assert_array_almost_equal(smoothed, truth)
def test_smooth_window_1d_dataarray():
"""Test smooth_window on 1D DataArray."""
temperature = xr.DataArray(
[37., 32., 34., 29., 28., 24., 26., 24., 27., 30.],
dims=('time',),
coords={'time': pd.date_range('2020-01-01', periods=10, freq='H')},
attrs={'units': 'degF'})
smoothed = smooth_window(temperature, window=np.ones(3) / 3, normalize_weights=False)
truth = xr.DataArray(
[37., 34.33333333, 31.66666667, 30.33333333, 27., 26., 24.66666667,
25.66666667, 27., 30.] * units.degF,
dims=('time',),
coords={'time': pd.date_range('2020-01-01', periods=10, freq='H')}
)
xr.testing.assert_allclose(smoothed, truth)
def test_smooth_rectangular():
"""Test smooth_rectangular with default configuration."""
hght = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
smoothed = smooth_rectangular(hght, (5, 3))
truth = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5710.66667, 5694., 5677.33333, 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
assert_array_almost_equal(smoothed, truth, 4)
def test_smooth_circular():
"""Test smooth_circular with default configuration."""
hght = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
smoothed = smooth_circular(hght, 2, 2)
truth = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5693.98817, 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
assert_array_almost_equal(smoothed, truth, 4)
def test_smooth_window_with_bad_window():
"""Test smooth_window with a bad window size."""
temperature = [37, 32, 34, 29, 28, 24, 26, 24, 27, 30] * units.degF
with pytest.raises(ValueError) as exc:
smooth_window(temperature, np.ones(4))
assert 'must be odd in all dimensions' in str(exc)
def test_altimeter_to_station_pressure_inhg():
"""Test the altimeter to station pressure function with inches of mercury."""
altim = 29.8 * units.inHg
elev = 500 * units.m
res = altimeter_to_station_pressure(altim, elev)
truth = 950.967 * units.hectopascal
assert_almost_equal(res, truth, 3)
def test_altimeter_to_station_pressure_hpa():
"""Test the altimeter to station pressure function with hectopascals."""
altim = 1013 * units.hectopascal
elev = 500 * units.m
res = altimeter_to_station_pressure(altim, elev)
truth = 954.641 * units.hectopascal
assert_almost_equal(res, truth, 3)
def test_altimiter_to_sea_level_pressure_inhg():
"""Test the altimeter to sea level pressure function with inches of mercury."""
altim = 29.8 * units.inHg
elev = 500 * units.m
temp = 30 * units.degC
res = altimeter_to_sea_level_pressure(altim, elev, temp)
truth = 1006.089 * units.hectopascal
assert_almost_equal(res, truth, 3)
def test_altimeter_to_sea_level_pressure_hpa():
"""Test the altimeter to sea level pressure function with hectopascals."""
altim = 1013 * units.hectopascal
elev = 500 * units.m
temp = 0 * units.degC
res = altimeter_to_sea_level_pressure(altim, elev, temp)
truth = 1016.246 * units.hectopascal
assert_almost_equal(res, truth, 3)
| bsd-3-clause |
vshtanko/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_cairo.py | 10 | 19252 | """
A Cairo backend for matplotlib
Author: Steve Chaplin
Cairo is a vector graphics library with cross-device output support.
Features of Cairo:
* anti-aliasing
* alpha channel
* saves image files as PNG, PostScript, PDF
http://cairographics.org
Requires (in order, all available from Cairo website):
cairo, pycairo
Naming Conventions
* classes MixedUpperCase
* varables lowerUpper
* functions underscore_separated
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys, warnings, gzip
import numpy as np
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
import cairocffi as cairo
except ImportError:
try:
import cairo
except ImportError:
raise ImportError("Cairo backend requires that cairocffi or pycairo is installed.")
else:
HAS_CAIRO_CFFI = False
else:
HAS_CAIRO_CFFI = True
_version_required = (1,2,0)
if cairo.version_info < _version_required:
raise ImportError ("Pycairo %d.%d.%d is installed\n"
"Pycairo %d.%d.%d or later is required"
% (cairo.version_info + _version_required))
backend_version = cairo.version
del _version_required
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, Affine2D
from matplotlib.font_manager import ttfFontProperty
_debug = False
#_debug = True
# Image::color_conv(format) for draw_image()
if sys.byteorder == 'little':
BYTE_FORMAT = 0 # BGRA
else:
BYTE_FORMAT = 1 # ARGB
class ArrayWrapper:
"""Thin wrapper around numpy ndarray to expose the interface
expected by cairocffi. Basically replicates the
array.array interface.
"""
def __init__(self, myarray):
self.__array = myarray
self.__data = myarray.ctypes.data
self.__size = len(myarray.flatten())
self.itemsize = myarray.itemsize
def buffer_info(self):
return (self.__data, self.__size)
class RendererCairo(RendererBase):
fontweights = {
100 : cairo.FONT_WEIGHT_NORMAL,
200 : cairo.FONT_WEIGHT_NORMAL,
300 : cairo.FONT_WEIGHT_NORMAL,
400 : cairo.FONT_WEIGHT_NORMAL,
500 : cairo.FONT_WEIGHT_NORMAL,
600 : cairo.FONT_WEIGHT_BOLD,
700 : cairo.FONT_WEIGHT_BOLD,
800 : cairo.FONT_WEIGHT_BOLD,
900 : cairo.FONT_WEIGHT_BOLD,
'ultralight' : cairo.FONT_WEIGHT_NORMAL,
'light' : cairo.FONT_WEIGHT_NORMAL,
'normal' : cairo.FONT_WEIGHT_NORMAL,
'medium' : cairo.FONT_WEIGHT_NORMAL,
'semibold' : cairo.FONT_WEIGHT_BOLD,
'bold' : cairo.FONT_WEIGHT_BOLD,
'heavy' : cairo.FONT_WEIGHT_BOLD,
'ultrabold' : cairo.FONT_WEIGHT_BOLD,
'black' : cairo.FONT_WEIGHT_BOLD,
}
fontangles = {
'italic' : cairo.FONT_SLANT_ITALIC,
'normal' : cairo.FONT_SLANT_NORMAL,
'oblique' : cairo.FONT_SLANT_OBLIQUE,
}
def __init__(self, dpi):
"""
"""
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
self.dpi = dpi
self.gc = GraphicsContextCairo (renderer=self)
self.text_ctx = cairo.Context (
cairo.ImageSurface (cairo.FORMAT_ARGB32,1,1))
self.mathtext_parser = MathTextParser('Cairo')
RendererBase.__init__(self)
def set_ctx_from_surface (self, surface):
self.gc.ctx = cairo.Context (surface)
def set_width_height(self, width, height):
self.width = width
self.height = height
self.matrix_flipy = cairo.Matrix (yy=-1, y0=self.height)
# use matrix_flipy for ALL rendering?
# - problem with text? - will need to switch matrix_flipy off, or do a
# font transform?
def _fill_and_stroke (self, ctx, fill_c, alpha, alpha_overrides):
if fill_c is not None:
ctx.save()
if len(fill_c) == 3 or alpha_overrides:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha)
else:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], fill_c[3])
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
@staticmethod
def convert_path(ctx, path, transform, clip=None):
for points, code in path.iter_segments(transform, clip=clip):
if code == Path.MOVETO:
ctx.move_to(*points)
elif code == Path.CLOSEPOLY:
ctx.close_path()
elif code == Path.LINETO:
ctx.line_to(*points)
elif code == Path.CURVE3:
ctx.curve_to(points[0], points[1],
points[0], points[1],
points[2], points[3])
elif code == Path.CURVE4:
ctx.curve_to(*points)
def draw_path(self, gc, path, transform, rgbFace=None):
ctx = gc.ctx
# We'll clip the path to the actual rendering extents
# if the path isn't filled.
if rgbFace is None and gc.get_hatch() is None:
clip = ctx.clip_extents()
else:
clip = None
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0, self.height)
ctx.new_path()
self.convert_path(ctx, path, transform, clip)
self._fill_and_stroke(ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
def draw_image(self, gc, x, y, im):
# bbox - not currently used
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
if sys.byteorder == 'little':
im = im[:, :, (2, 1, 0, 3)]
else:
im = im[:, :, (3, 0, 1, 2)]
if HAS_CAIRO_CFFI:
# cairocffi tries to use the buffer_info from array.array
# that we replicate in ArrayWrapper and alternatively falls back
# on ctypes to get a pointer to the numpy array. This works
# correctly on a numpy array in python3 but not 2.7. We replicate
# the array.array functionality here to get cross version support.
imbuffer = ArrayWrapper(im.flatten())
else:
# py2cairo uses PyObject_AsWriteBuffer
# to get a pointer to the numpy array this works correctly
# on a regular numpy array but not on a memory view.
# At the time of writing the latest release version of
# py3cairo still does not support create_for_data
imbuffer = im.flatten()
surface = cairo.ImageSurface.create_for_data(imbuffer,
cairo.FORMAT_ARGB32,
im.shape[1],
im.shape[0],
im.shape[1]*4)
ctx = gc.ctx
y = self.height - y - im.shape[0]
ctx.save()
ctx.set_source_surface(surface, float(x), float(y))
if gc.get_alpha() != 1.0:
ctx.paint_with_alpha(gc.get_alpha())
else:
ctx.paint()
ctx.restore()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# Note: x,y are device/display coords, not user-coords, unlike other
# draw_* methods
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
ctx = gc.ctx
ctx.new_path()
ctx.move_to (x, y)
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
size = prop.get_size_in_points() * self.dpi / 72.0
ctx.save()
if angle:
ctx.rotate (-angle * np.pi / 180)
ctx.set_font_size (size)
if HAS_CAIRO_CFFI:
if not isinstance(s, six.text_type):
s = six.text_type(s)
else:
if not six.PY3 and isinstance(s, six.text_type):
s = s.encode("utf-8")
ctx.show_text(s)
ctx.restore()
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
ctx = gc.ctx
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.dpi, prop)
ctx.save()
ctx.translate(x, y)
if angle:
ctx.rotate (-angle * np.pi / 180)
for font, fontsize, s, ox, oy in glyphs:
ctx.new_path()
ctx.move_to(ox, oy)
fontProp = ttfFontProperty(font)
ctx.save()
ctx.select_font_face (fontProp.name,
self.fontangles [fontProp.style],
self.fontweights[fontProp.weight])
size = fontsize * self.dpi / 72.0
ctx.set_font_size(size)
if not six.PY3 and isinstance(s, six.text_type):
s = s.encode("utf-8")
ctx.show_text(s)
ctx.restore()
for ox, oy, w, h in rects:
ctx.new_path()
ctx.rectangle (ox, oy, w, h)
ctx.set_source_rgb (0, 0, 0)
ctx.fill_preserve()
ctx.restore()
def flipy(self):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
return True
#return False # tried - all draw objects ok except text (and images?)
# which comes out mirrored!
def get_canvas_width_height(self):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
if ismath:
width, height, descent, fonts, used_characters = self.mathtext_parser.parse(
s, self.dpi, prop)
return width, height, descent
ctx = self.text_ctx
ctx.save()
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
# Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c
# but if /96.0 is used the font is too small
size = prop.get_size_in_points() * self.dpi / 72.0
# problem - scale remembers last setting and font can become
# enormous causing program to crash
# save/restore prevents the problem
ctx.set_font_size (size)
y_bearing, w, h = ctx.text_extents (s)[1:4]
ctx.restore()
return w, h, h + y_bearing
def new_gc(self):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
self.gc.ctx.save()
self.gc._alpha = 1.0
self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA
return self.gc
def points_to_pixels(self, points):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
return points/72.0 * self.dpi
class GraphicsContextCairo(GraphicsContextBase):
_joind = {
'bevel' : cairo.LINE_JOIN_BEVEL,
'miter' : cairo.LINE_JOIN_MITER,
'round' : cairo.LINE_JOIN_ROUND,
}
_capd = {
'butt' : cairo.LINE_CAP_BUTT,
'projecting' : cairo.LINE_CAP_SQUARE,
'round' : cairo.LINE_CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
def restore(self):
self.ctx.restore()
def set_alpha(self, alpha):
GraphicsContextBase.set_alpha(self, alpha)
_alpha = self.get_alpha()
rgb = self._rgb
if self.get_forced_alpha():
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], _alpha)
else:
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], rgb[3])
#def set_antialiased(self, b):
# enable/disable anti-aliasing is not (yet) supported by Cairo
def set_capstyle(self, cs):
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
self.ctx.set_line_cap (self._capd[cs])
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
if not rectangle: return
x,y,w,h = rectangle.bounds
# pixel-aligned clip-regions are faster
x,y,w,h = np.round(x), np.round(y), np.round(w), np.round(h)
ctx = self.ctx
ctx.new_path()
ctx.rectangle (x, self.renderer.height - h - y, w, h)
ctx.clip ()
def set_clip_path(self, path):
if not path: return
tpath, affine = path.get_transformed_path_and_affine()
ctx = self.ctx
ctx.new_path()
affine = affine + Affine2D().scale(1.0, -1.0).translate(0.0, self.renderer.height)
RendererCairo.convert_path(ctx, tpath, affine)
ctx.clip()
def set_dashes(self, offset, dashes):
self._dashes = offset, dashes
if dashes == None:
self.ctx.set_dash([], 0) # switch dashes off
else:
self.ctx.set_dash(
list(self.renderer.points_to_pixels(np.asarray(dashes))), offset)
def set_foreground(self, fg, isRGBA=None):
GraphicsContextBase.set_foreground(self, fg, isRGBA)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def get_rgb(self):
return self.ctx.get_source().get_rgba()[:3]
def set_joinstyle(self, js):
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
self.ctx.set_line_join(self._joind[js])
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
self._linewidth = float(w)
self.ctx.set_line_width (self.renderer.points_to_pixels(w))
def new_figure_manager(num, *args, **kwargs): # called by backends/__init__.py
"""
Create a new figure manager instance
"""
if _debug: print('%s()' % (_fn_name()))
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasCairo(figure)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasCairo (FigureCanvasBase):
def print_png(self, fobj, *args, **kwargs):
width, height = self.get_width_height()
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width, height)
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)
renderer.set_ctx_from_surface (surface)
self.figure.draw (renderer)
surface.write_to_png (fobj)
def print_pdf(self, fobj, *args, **kwargs):
return self._save(fobj, 'pdf', *args, **kwargs)
def print_ps(self, fobj, *args, **kwargs):
return self._save(fobj, 'ps', *args, **kwargs)
def print_svg(self, fobj, *args, **kwargs):
return self._save(fobj, 'svg', *args, **kwargs)
def print_svgz(self, fobj, *args, **kwargs):
return self._save(fobj, 'svgz', *args, **kwargs)
def _save (self, fo, format, **kwargs):
# save PDF/PS/SVG
orientation = kwargs.get('orientation', 'portrait')
dpi = 72
self.figure.dpi = dpi
w_in, h_in = self.figure.get_size_inches()
width_in_points, height_in_points = w_in * dpi, h_in * dpi
if orientation == 'landscape':
width_in_points, height_in_points = (height_in_points,
width_in_points)
if format == 'ps':
if not hasattr(cairo, 'PSSurface'):
raise RuntimeError ('cairo has not been compiled with PS '
'support enabled')
surface = cairo.PSSurface (fo, width_in_points, height_in_points)
elif format == 'pdf':
if not hasattr(cairo, 'PDFSurface'):
raise RuntimeError ('cairo has not been compiled with PDF '
'support enabled')
surface = cairo.PDFSurface (fo, width_in_points, height_in_points)
elif format in ('svg', 'svgz'):
if not hasattr(cairo, 'SVGSurface'):
raise RuntimeError ('cairo has not been compiled with SVG '
'support enabled')
if format == 'svgz':
if is_string_like(fo):
fo = gzip.GzipFile(fo, 'wb')
else:
fo = gzip.GzipFile(None, 'wb', fileobj=fo)
surface = cairo.SVGSurface (fo, width_in_points, height_in_points)
else:
warnings.warn ("unknown format: %s" % format)
return
# surface.set_dpi() can be used
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width_in_points, height_in_points)
renderer.set_ctx_from_surface (surface)
ctx = renderer.gc.ctx
if orientation == 'landscape':
ctx.rotate (np.pi/2)
ctx.translate (0, -height_in_points)
# cairo/src/cairo_ps_surface.c
# '%%Orientation: Portrait' is always written to the file header
# '%%Orientation: Landscape' would possibly cause problems
# since some printers would rotate again ?
# TODO:
# add portrait/landscape checkbox to FileChooser
self.figure.draw (renderer)
show_fig_border = False # for testing figure orientation and scaling
if show_fig_border:
ctx.new_path()
ctx.rectangle(0, 0, width_in_points, height_in_points)
ctx.set_line_width(4.0)
ctx.set_source_rgb(1,0,0)
ctx.stroke()
ctx.move_to(30,30)
ctx.select_font_face ('sans-serif')
ctx.set_font_size(20)
ctx.show_text('Origin corner')
ctx.show_page()
surface.finish()
if format == 'svgz':
fo.close()
FigureCanvas = FigureCanvasCairo
| gpl-3.0 |
blackball/an-test6 | util/compare-runs.py | 1 | 3995 | #! /usr/bin/env python
import matplotlib
matplotlib.use('Agg')
from astrometry.util.fits import *
from astrometry.util.file import *
from pylab import *
from optparse import *
import os
from glob import glob
from numpy import *
def get_field(fieldname, m1, m2, nil, preproc=None):
t1 = []
t2 = []
for k,v in m1.items():
tt1 = v.getcolumn(fieldname)
if preproc is not None:
#print 'tt1=', tt1
tt1 = preproc(tt1)
#print 'after: tt1=', tt1
if not k in m2:
tt2 = nil
else:
tt2 = m2[k].getcolumn(fieldname)
if preproc is not None:
tt2 = preproc(tt2)
t1.append(tt1)
t2.append(tt2)
for k,v in m2.items():
if k in m1:
continue
tt2 = v.getcolumn(fieldname)
if preproc is not None:
tt2 = preproc(tt2)
t2.append(tt2)
t1.append(nil)
return t1,t2
def get_scalar(fieldname, m1, m2, nil):
t1,t2 = get_field(fieldname, m1, m2, None)
s1,s2 = [],[]
for tt1,tt2 in zip(t1,t2):
if tt1 is None and tt2 is None:
s1.append(nil)
s2.append(nil)
continue
if tt1 is None:
s1.append([nil for i in range(len(tt2))])
else:
s1.append(tt1)
if tt2 is None:
s2.append([nil for i in range(len(tt1))])
else:
s2.append(tt2)
t1 = hstack(s1)
t2 = hstack(s2)
return t1,t2
if __name__ == '__main__':
parser = OptionParser()
opt,args = parser.parse_args()
for id1,d1 in enumerate(args):
allm1 = os.path.join(d1, 'matches.pickle')
if os.path.exists(allm1):
continue
print 'Caching', allm1
matches = {}
for fn in glob(d1 + '/*.match'):
matches[fn.replace(d1+'/', '')] = fits_table(fn)
pickle_to_file(matches, allm1)
for id1,d1 in enumerate(args):
allm1 = os.path.join(d1, 'matches.pickle')
m1 = unpickle_from_file(allm1)
for id2,d2 in enumerate(args):
if id2 <= id1:
continue
allm2 = os.path.join(d2, 'matches.pickle')
m2 = unpickle_from_file(allm2)
# CPU time.
tz,tinf = 1e-3, 100.
clf()
t1,t2 = get_scalar('timeused', m1, m2, tinf)
#plot(t1, t2, 'r.')
loglog(t1, t2, 'r.')
t1 = clip(t1, tz, tinf)
t2 = clip(t2, tz, tinf)
xlabel(d1 + ': CPU time (s)')
ylabel(d2 + ': CPU time (s)')
fn = 'cputime-%i-%i.png' % (id1, id2)
print 'saving', fn
plot([tz,tinf],[tz,tinf], 'k-')
axis([tz,tinf,tz,tinf])
savefig(fn)
# N objs
tinf = 1000.
clf()
def ppmax(x):
return amax(x, axis=1)
t1,t2 = get_field('fieldobjs', m1, m2, tinf, preproc=ppmax)
t1 = array(list(flatten(t1)))
t2 = array(list(flatten(t2)))
#print 'nobjs: t1=', t1
#print 't2=', t2
dN = 50
I = logical_or((t1 - t2) > dN , (t1 - t2) < -dN)
for i in flatnonzero(I):
k1,v1 = (m1.items())[i]
k2,v2 = (m2.items())[i]
print 'Nmatch changed:'
print ' %s: %i' % (k1, t1[i])
print ' %s: %i' % (k2, t2[i])
plot(t1, t2, 'r.')
xlabel(d1 + ': N objects examined')
ylabel(d2 + ': N objects examined')
fn = 'nobjs-%i-%i.png' % (id1, id2)
print 'saving', fn
plot([0,100],[0,100], 'k-')
axis([0,100,0,100])
savefig(fn)
# N matches
tz,tinf = 0.,300.
clf()
t1,t2 = get_scalar('nmatch', m1, m2, tinf)
print 'm1,m2,t1,t2', len(m1),len(m2), len(t1),len(t2)
I = (t2 < 50)
for i in flatnonzero(I):
k,v = (m2.items())[i]
print 't2 file %s, nmatches %i' % (k, t2[i])
t1 = clip(t1, tz, tinf)
t2 = clip(t2, tz, tinf)
plot(t1, t2, 'r.')
xlabel(d1 + ': N objects matched')
ylabel(d2 + ': N objects matched')
fn = 'nmatch-%i-%i.png' % (id1, id2)
print 'saving', fn
plot([tz,tinf],[tz,tinf], 'k-')
axis([tz,tinf,tz,tinf])
savefig(fn)
# Index #
tinf = 1000.
clf()
t1,t2 = get_scalar('indexid', m1, m2, tinf)
plot(t1, t2, 'r.')
xlabel(d1 + ': Index number')
ylabel(d2 + ': Index number')
print 't1: ', unique(t1)
print 't2: ', unique(t2)
axis([min(t1)-1, max(t1)+1, min(t2)-1, max(t2)+1])
xticks(range(min(t1), max(t1)))
yticks(range(min(t2), max(t2)))
fn = 'indexid-%i-%i.png' % (id1, id2)
print 'saving', fn
savefig(fn)
| gpl-2.0 |
noferini/AliceO2 | Analysis/Scripts/update_ccdb.py | 3 | 5974 | #!/usr/bin/env python3
# Copyright CERN and copyright holders of ALICE O2. This software is
# distributed under the terms of the GNU General Public License v3 (GPL
# Version 3), copied verbatim in the file "COPYING".
#
# See http://alice-o2.web.cern.ch/license for full licensing information.
#
# In applying this license CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""
Script to update the CCDB with timestamp non-overlapping objects.
If an object is found in the range specified, the object is split into two.
If the requested range was overlapping three objects are uploaded on CCDB:
1) latest object with requested timestamp validity
2) old object with validity [old_lower_validity-requested_lower_bound]
3) old object with validity [requested_upper_bound, old_upper_validity]
Author: Nicolo' Jacazio on 2020-06-22
TODO add support for 3 files update
"""
import subprocess
from datetime import datetime
import matplotlib.pyplot as plt
import argparse
def convert_timestamp(ts):
"""
Converts the timestamp in milliseconds in human readable format
"""
return datetime.utcfromtimestamp(ts/1000).strftime('%Y-%m-%d %H:%M:%S')
def get_ccdb_obj(path, timestamp, dest="/tmp/", verbose=0):
"""
Gets the ccdb object from 'path' and 'timestamp' and downloads it into 'dest'
"""
if verbose:
print("Getting obj", path, "with timestamp",
timestamp, convert_timestamp(timestamp))
cmd = f"o2-ccdb-downloadccdbfile --path {path} --dest {dest} --timestamp {timestamp}"
subprocess.run(cmd.split())
def get_ccdb_obj_validity(path, dest="/tmp/", verbose=0):
"""
Gets the timestamp validity for an object downloaded from CCDB.
Returns a list with the initial and end timestamps.
"""
cmd = f"o2-ccdb-inspectccdbfile {dest}{path}/snapshot.root"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
output = output.decode("utf-8").split("\n")
error = error.decode("utf-8").split("\n") if error is not None else error
if verbose:
print("out:")
print(*output, "\n")
print("err:")
print(error)
result = list(filter(lambda x: x.startswith('Valid-'), output))
ValidFrom = result[0].split()
ValidUntil = result[1].split()
return [int(ValidFrom[-1]), int(ValidUntil[-1])]
def upload_ccdb_obj(path, timestamp_from, timestamp_until, dest="/tmp/", meta=""):
"""
Uploads a new object to CCDB in the 'path' using the validity timestamp specified
"""
print("Uploading obj", path, "with timestamp", [timestamp_from, timestamp_until],
convert_timestamp(timestamp_from), convert_timestamp(timestamp_until))
key = path.split("/")[-1]
cmd = f"o2-ccdb-upload -f {dest}{path}/snapshot.root "
cmd += f"--key {key} --path {path} "
cmd += f"--starttimestamp {timestamp_from} --endtimestamp {timestamp_until} --meta \"{meta}\""
subprocess.run(cmd.split())
def main(path, timestamp_from, timestamp_until, verbose=0, show=False):
"""
Used to upload a new object to CCDB in 'path' valid from 'timestamp_from' to 'timestamp_until'
Gets the object from CCDB specified in 'path' and for 'timestamp_from-1'
Gets the object from CCDB specified in 'path' and for 'timestamp_until+1'
If required plots the situation before and after the update
"""
get_ccdb_obj(path, timestamp_from-1)
val_before = get_ccdb_obj_validity(path, verbose=verbose)
get_ccdb_obj(path, timestamp_until+1)
val_after = get_ccdb_obj_validity(path, verbose=verbose)
overlap_before = val_before[1] > timestamp_from
overlap_after = val_after[0] < timestamp_until
if verbose:
if overlap_before:
print("Previous objects overalps")
if overlap_after:
print("Next objects overalps")
trimmed_before = val_before if not overlap_before else [
val_before[0], timestamp_from - 1]
trimmed_after = val_after if not overlap_after else [
timestamp_until+1, val_after[1]]
if show:
fig, ax = plt.subplots()
fig
def bef_af(v, y):
return [v[0] - 1] + v + [v[1] + 1], [0, y, y, 0]
if True:
ax.plot(*bef_af(val_before, 0.95), label='before')
ax.plot(*bef_af(val_after, 1.05), label='after')
if False:
ax.plot(*bef_af(trimmed_before, 0.9), label='trimmed before')
ax.plot(*bef_af(trimmed_after, 1.1), label='trimmed after')
ax.plot(*bef_af([timestamp_from, timestamp_until], 1), label='object')
xlim = 10000000
plt.xlim([timestamp_from-xlim, timestamp_until+xlim])
plt.ylim(0, 2)
plt.xlabel('Timestamp')
plt.ylabel('Validity')
plt.legend()
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Uploads timestamp non overlapping objects to CCDB."
"Basic example: `./update_ccdb.py qc/TOF/TOFTaskCompressed/hDiagnostic 1588956517161 1588986517161 --show --verbose`")
parser.add_argument('path', metavar='path_to_object', type=str,
help='Path of the object in the CCDB repository')
parser.add_argument('timestamp_from', metavar='from_timestamp', type=int,
help='Timestamp of start for the new object to use')
parser.add_argument('timestamp_until', metavar='until_timestamp', type=int,
help='Timestamp of stop for the new object to use')
parser.add_argument('--verbose', '-v', action='count', default=0)
parser.add_argument('--show', '-s', action='count', default=0)
args = parser.parse_args()
main(path=args.path,
timestamp_from=args.timestamp_from,
timestamp_until=args.timestamp_until,
verbose=args.verbose,
show=args.show)
| gpl-3.0 |
KevinHock/rtdpyt | profiling/test_projects/flaskbb_lite_2/mprof.py | 3 | 17606 | #!/usr/bin/env python
import glob
import os
import os.path as osp
import sys
import re
import copy
import time
import math
from optparse import OptionParser, OptionValueError
import memory_profiler as mp
ALL_ACTIONS = ("run", "rm", "clean", "list", "plot")
help_msg = """
Available commands:
run run a given command or python file
rm remove a given file generated by mprof
clean clean the current directory from files created by mprof
list display existing profiles, with indices
plot plot memory consumption generated by mprof run
Type mprof <command> --help for usage help on a specific command.
For example, mprof plot --help will list all plotting options.
"""
def print_usage():
print("Usage: %s <command> <options> <arguments>"
% osp.basename(sys.argv[0]))
print(help_msg)
def get_action():
"""Pop first argument, check it is a valid action."""
if len(sys.argv) <= 1:
print_usage()
sys.exit(1)
if not sys.argv[1] in ALL_ACTIONS:
print_usage()
sys.exit(1)
return sys.argv.pop(1)
def get_profile_filenames(args):
"""Return list of profile filenames.
Parameters
==========
args (list)
list of filename or integer. An integer is the index of the
profile in the list of existing profiles. 0 is the oldest,
-1 in the more recent.
Non-existing files cause a ValueError exception to be thrown.
Returns
=======
filenames (list)
list of existing memory profile filenames. It is guaranteed
that an given file name will not appear twice in this list.
"""
profiles = glob.glob("mprofile_??????????????.dat")
profiles.sort()
if args is "all":
filenames = copy.copy(profiles)
else:
filenames = []
for arg in args:
if arg == "--": # workaround
continue
try:
index = int(arg)
except ValueError:
index = None
if index is not None:
try:
filename = profiles[index]
except IndexError:
raise ValueError("Invalid index (non-existing file): %s" % arg)
if filename not in filenames:
filenames.append(filename)
else:
if osp.isfile(arg):
if arg not in filenames:
filenames.append(arg)
elif osp.isdir(arg):
raise ValueError("Path %s is a directory" % arg)
else:
raise ValueError("File %s not found" % arg)
# Add timestamp files, if any
for filename in reversed(filenames):
parts = osp.splitext(filename)
timestamp_file = parts[0] + "_ts" + parts[1]
if osp.isfile(timestamp_file) and timestamp_file not in filenames:
filenames.append(timestamp_file)
return filenames
def list_action():
"""Display existing profiles, with indices."""
parser = OptionParser(version=mp.__version__)
parser.disable_interspersed_args()
(options, args) = parser.parse_args()
if len(args) > 0:
print("This command takes no argument.")
sys.exit(1)
filenames = get_profile_filenames("all")
for n, filename in enumerate(filenames):
ts = osp.splitext(filename)[0].split('_')[-1]
print("{index} {filename} {hour}:{min}:{sec} {day}/{month}/{year}"
.format(index=n, filename=filename,
year=ts[:4], month=ts[4:6], day=ts[6:8],
hour=ts[8:10], min=ts[10:12], sec=ts[12:14]))
def rm_action():
"""TODO: merge with clean_action (@pgervais)"""
parser = OptionParser(version=mp.__version__)
parser.disable_interspersed_args()
parser.add_option("--dry-run", dest="dry_run", default=False,
action="store_true",
help="""Show what will be done, without actually doing it.""")
(options, args) = parser.parse_args()
if len(args) == 0:
print("A profile to remove must be provided (number or filename)")
sys.exit(1)
filenames = get_profile_filenames(args)
if options.dry_run:
print("Files to be removed: ")
for filename in filenames:
print(filename)
else:
for filename in filenames:
os.remove(filename)
def clean_action():
"""Remove every profile file in current directory."""
parser = OptionParser(version=mp.__version__)
parser.disable_interspersed_args()
parser.add_option("--dry-run", dest="dry_run", default=False,
action="store_true",
help="""Show what will be done, without actually doing it.""")
(options, args) = parser.parse_args()
if len(args) > 0:
print("This command takes no argument.")
sys.exit(1)
filenames = get_profile_filenames("all")
if options.dry_run:
print("Files to be removed: ")
for filename in filenames:
print(filename)
else:
for filename in filenames:
os.remove(filename)
def get_cmd_line(args):
"""Given a set or arguments, compute command-line."""
blanks = set(' \t')
args = [s if blanks.isdisjoint(s) else "'" + s + "'" for s in args]
return ' '.join(args)
def run_action():
import time, subprocess
parser = OptionParser(version=mp.__version__)
parser.disable_interspersed_args()
parser.add_option("--python", dest="python", default=False,
action="store_true",
help="""Activates extra features when the profiled executable is
a Python program (currently: function timestamping.)""")
parser.add_option("--nopython", dest="nopython", default=False,
action="store_true",
help="""Disables extra features when the profiled executable is
a Python program (currently: function timestamping.)""")
parser.add_option("--interval", "-T", dest="interval", default="0.1",
type="float", action="store",
help="Sampling period (in seconds), defaults to 0.1")
parser.add_option("--include-children", "-C", dest="include_children",
default=False, action="store_true",
help="""Monitors forked processes as well (sum up all process memory)""")
(options, args) = parser.parse_args()
if len(args) == 0:
print("A program to run must be provided. Use -h for help")
sys.exit(1)
print("{1}: Sampling memory every {0.interval}s".format(
options, osp.basename(sys.argv[0])))
## Output results in a file called "mprofile_<YYYYMMDDhhmmss>.dat" (where
## <YYYYMMDDhhmmss> is the date-time of the program start) in the current
## directory. This file contains the process memory consumption, in Mb (one
## value per line). Memory is sampled twice each second."""
suffix = time.strftime("%Y%m%d%H%M%S", time.localtime())
mprofile_output = "mprofile_%s.dat" % suffix
# .. TODO: more than one script as argument ? ..
if args[0].endswith('.py') and not options.nopython:
options.python = True
if options.python:
print("running as a Python program...")
if not args[0].startswith("python"):
args.insert(0, "python")
cmd_line = get_cmd_line(args)
args[1:1] = ("-m", "memory_profiler", "--timestamp",
"-o", mprofile_output)
p = subprocess.Popen(args)
else:
cmd_line = get_cmd_line(args)
p = subprocess.Popen(args)
with open(mprofile_output, "a") as f:
f.write("CMDLINE {0}\n".format(cmd_line))
mp.memory_usage(proc=p, interval=options.interval, timestamps=True,
include_children=options.include_children, stream=f)
def add_brackets(xloc, yloc, xshift=0, color="r", label=None, options=None):
"""Add two brackets on the memory line plot.
This function uses the current figure.
Parameters
==========
xloc: tuple with 2 values
brackets location (on horizontal axis).
yloc: tuple with 2 values
brackets location (on vertical axis)
xshift: float
value to subtract to xloc.
"""
try:
import pylab as pl
except ImportError:
print("matplotlib is needed for plotting.")
sys.exit(1)
height_ratio = 20.
vsize = (pl.ylim()[1] - pl.ylim()[0]) / height_ratio
hsize = (pl.xlim()[1] - pl.xlim()[0]) / (3.*height_ratio)
bracket_x = pl.asarray([hsize, 0, 0, hsize])
bracket_y = pl.asarray([vsize, vsize, -vsize, -vsize])
# Matplotlib workaround: labels starting with _ aren't displayed
if label[0] == '_':
label = ' ' + label
if options.xlim is None or options.xlim[0] <= (xloc[0] - xshift) <= options.xlim[1]:
pl.plot(bracket_x + xloc[0] - xshift, bracket_y + yloc[0],
"-" + color, linewidth=2, label=label)
if options.xlim is None or options.xlim[0] <= (xloc[1] - xshift) <= options.xlim[1]:
pl.plot(-bracket_x + xloc[1] - xshift, bracket_y + yloc[1],
"-" + color, linewidth=2 )
# TODO: use matplotlib.patches.Polygon to draw a colored background for
# each function.
# with maplotlib 1.2, use matplotlib.path.Path to create proper markers
# see http://matplotlib.org/examples/pylab_examples/marker_path.html
# This works with matplotlib 0.99.1
## pl.plot(xloc[0], yloc[0], "<"+color, markersize=7, label=label)
## pl.plot(xloc[1], yloc[1], ">"+color, markersize=7)
def read_mprofile_file(filename):
"""Read an mprofile file and return its content.
Returns
=======
content: dict
Keys:
- "mem_usage": (list) memory usage values, in MiB
- "timestamp": (list) time instant for each memory usage value, in
second
- "func_timestamp": (dict) for each function, timestamps and memory
usage upon entering and exiting.
- 'cmd_line': (str) command-line ran for this profile.
"""
func_ts = {}
mem_usage = []
timestamp = []
cmd_line = None
f = open(filename, "r")
for l in f:
if l == '\n':
raise ValueError('Sampling time was too short')
field, value = l.split(' ', 1)
if field == "MEM":
# mem, timestamp
values = value.split(' ')
mem_usage.append(float(values[0]))
timestamp.append(float(values[1]))
elif field == "FUNC":
values = value.split(' ')
f_name, mem_start, start, mem_end, end = values[:5]
ts = func_ts.get(f_name, [])
ts.append([float(start), float(end),
float(mem_start), float(mem_end)])
func_ts[f_name] = ts
elif field == "CMDLINE":
cmd_line = value
else:
pass
f.close()
return {"mem_usage": mem_usage, "timestamp": timestamp,
"func_timestamp": func_ts, 'filename': filename,
'cmd_line': cmd_line}
def plot_file(filename, index=0, timestamps=True, options=None):
try:
import pylab as pl
except ImportError:
print("matplotlib is needed for plotting.")
sys.exit(1)
import numpy as np # pylab requires numpy anyway
mprofile = read_mprofile_file(filename)
if len(mprofile['timestamp']) == 0:
print('** No memory usage values have been found in the profile '
'file.**\nFile path: {0}\n'
'File may be empty or invalid.\n'
'It can be deleted with "mprof rm {0}"'.format(
mprofile['filename']))
sys.exit(0)
# Merge function timestamps and memory usage together
ts = mprofile['func_timestamp']
t = mprofile['timestamp']
mem = mprofile['mem_usage']
if len(ts) > 0:
for values in ts.values():
for v in values:
t.extend(v[:2])
mem.extend(v[2:4])
mem = np.asarray(mem)
t = np.asarray(t)
ind = t.argsort()
mem = mem[ind]
t = t[ind]
# Plot curves
global_start = float(t[0])
t = t - global_start
max_mem = mem.max()
max_mem_ind = mem.argmax()
all_colors=("c", "y", "g", "r", "b")
mem_line_colors=("k", "b", "r", "g", "c", "y", "m")
mem_line_label = time.strftime("%d / %m / %Y - start at %H:%M:%S",
time.localtime(global_start)) \
+ ".{0:03d}".format(int(round(math.modf(global_start)[0]*1000)))
pl.plot(t, mem, "+-" + mem_line_colors[index % len(mem_line_colors)],
label=mem_line_label)
bottom, top = pl.ylim()
bottom += 0.001
top -= 0.001
# plot timestamps, if any
if len(ts) > 0 and timestamps:
func_num = 0
for f, exec_ts in ts.items():
for execution in exec_ts:
add_brackets(execution[:2], execution[2:], xshift=global_start,
color= all_colors[func_num % len(all_colors)],
label=f.split(".")[-1]
+ " %.3fs" % (execution[1] - execution[0]), options=options)
func_num += 1
if timestamps:
pl.hlines(max_mem,
pl.xlim()[0] + 0.001, pl.xlim()[1] - 0.001,
colors="r", linestyles="--")
pl.vlines(t[max_mem_ind], bottom, top,
colors="r", linestyles="--")
return mprofile
def plot_action():
def get_comma_separated_args(option, opt, value, parser):
try:
newvalue = [float(x) for x in value.split(',')]
except:
raise OptionValueError("'%s' option must contain two numbers separated with a comma" % value)
if len(newvalue) != 2:
raise OptionValueError("'%s' option must contain two numbers separated with a comma" % value)
setattr(parser.values, option.dest, newvalue)
try:
import pylab as pl
except ImportError:
print("matplotlib is needed for plotting.")
sys.exit(1)
parser = OptionParser(version=mp.__version__)
parser.disable_interspersed_args()
parser.add_option("--title", "-t", dest="title", default=None,
type="str", action="store",
help="String shown as plot title")
parser.add_option("--no-function-ts", "-n", dest="no_timestamps",
default=False, action="store_true",
help="Do not display function timestamps on plot.")
parser.add_option("--output", "-o",
help="Save plot to file instead of displaying it.")
parser.add_option("--window", "-w", dest="xlim",
type="str", action="callback",
callback=get_comma_separated_args,
help="Plot a time-subset of the data. E.g. to plot between 0 and 20.5 seconds: --window 0,20.5")
(options, args) = parser.parse_args()
profiles = glob.glob("mprofile_??????????????.dat")
profiles.sort()
if len(args) == 0:
if len(profiles) == 0:
print("No input file found. \nThis program looks for "
"mprofile_*.dat files, generated by the "
"'mprof run' command.")
sys.exit(-1)
print("Using last profile data.")
filenames = [profiles[-1]]
else:
filenames = []
for arg in args:
if osp.exists(arg):
if not arg in filenames:
filenames.append(arg)
else:
try:
n = int(arg)
if not profiles[n] in filenames:
filenames.append(profiles[n])
except ValueError:
print("Input file not found: " + arg)
if not len(filenames):
print("No files found from given input.")
sys.exit(-1)
fig = pl.figure(figsize=(14, 6), dpi=90)
ax = fig.add_axes([0.1, 0.1, 0.6, 0.75])
if options.xlim is not None:
pl.xlim(options.xlim[0], options.xlim[1])
if len(filenames) > 1 or options.no_timestamps:
timestamps = False
else:
timestamps = True
for n, filename in enumerate(filenames):
mprofile = plot_file(filename, index=n, timestamps=timestamps, options=options)
pl.xlabel("time (in seconds)")
pl.ylabel("memory used (in MiB)")
if options.title is None and len(filenames) == 1:
pl.title(mprofile['cmd_line'])
else:
if options.title is not None:
pl.title(options.title)
# place legend within the plot, make partially transparent in
# case it obscures part of the lineplot
leg = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
leg.get_frame().set_alpha(0.5)
pl.grid()
if options.output:
pl.savefig(options.output)
else:
pl.show()
if __name__ == "__main__":
# Workaround for optparse limitation: insert -- before first negative
# number found.
negint = re.compile("-[0-9]+")
for n, arg in enumerate(sys.argv):
if negint.match(arg):
sys.argv.insert(n, "--")
break
actions = {"rm": rm_action,
"clean": clean_action,
"list": list_action,
"run": run_action,
"plot": plot_action}
actions[get_action()]()
| gpl-2.0 |
tswast/google-cloud-python | securitycenter/docs/conf.py | 2 | 11946 | # -*- coding: utf-8 -*-
#
# google-cloud-securitycenter documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-securitycenter"
copyright = u"2017, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-securitycenter-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-securitycenter.tex",
u"google-cloud-securitycenter Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-securitycenter",
u"google-cloud-securitycenter Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-securitycenter",
u"google-cloud-securitycenter Documentation",
author,
"google-cloud-securitycenter",
"GAPIC library for the {metadata.shortName} v1 service",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("https://requests.kennethreitz.org/en/stable/", None),
"fastavro": ("https://fastavro.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.