repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
biokit/biokit | biokit/viz/heatmap.py | 1 | 13458 | """Heatmap and dendograms"""
import matplotlib
import pylab
import scipy.cluster.hierarchy as hierarchy
import scipy.spatial.distance as distance
import numpy as np # get rid of this dependence
import easydev
import colormap
from biokit.viz.linkage import Linkage
__all__ = ['Heatmap']
def get_heatmap_df():
"""a simple example to play with and perform test"""
import pandas as pd
df = pd.DataFrame(
{'A':[1,0,1,1],
'B':[.9,0.1,.6,1],
'C':[.5,.2,0,1],
'D':[.5,.2,0,1]})
return df
#def heatmap(data, *args, **kargs):
# """alias to Heatmap class"""
# h = Heatmap(data, *args, **kargs)
# h.plot()
# return h
class Heatmap(Linkage):
"""Heatmap and dendograms of an input matrix
A heat map is an image representation of a matrix with a
dendrogram added to the left side and to the top. Typically,
reordering of the rows and columns according to some set of values
(row or column means) within the restrictions imposed by the
dendrogram is carried out.
.. plot::
:include-source:
:width: 50%
from biokit.viz import heatmap
df = heatmap.get_heatmap_df()
h = heatmap.Heatmap(df)
h.plot()
.. warning:: in progress
"""
def __init__(self, data=None, row_method='complete', column_method='complete',
row_metric='euclidean',column_metric='euclidean',
cmap='yellow_black_blue',
col_side_colors=None, row_side_colors=None,
verbose=True
):
""".. rubric:: constructor
:param data: a dataframe or possibly a numpy matrix.
.. todo:: if row_method id none, no ordering in the dendogram
"""
# should be a copy since it may be reshuffled ?
try:
if data is None and verbose is True:
print("No data provided, please fill the `df` attribute manually")
else:
self._df = data.copy()
except AttributeError as err:
print("input must be a pandas data frame or numpy matrix")
raise(err)
self._row_method = row_method
self._column_method = column_method
self._column_metric = column_metric
self._row_metric = row_metric
# some default parameters
self.cluster_criterion = 'distance'
self.params = easydev.AttrDict()
self.params.col_side_colors = ['r', 'g', 'b', 'y', 'w', 'k', 'm']
self.params.row_side_colors = ['r', 'g', 'b', 'y', 'w', 'k', 'm']
self.params.cmap = cmap
self.category_row = None
self.category_column = None
if col_side_colors:
self.params.col_side_colors = col_side_colors
if row_side_colors:
self.params.row_side_colors = row_side_colors
def _get_df(self):
return self._df
def _set_df(self, data):
self._df = data.copy()
df = property(_get_df, _set_df)
frame = property(_get_df, _set_df)
def _get_row_method(self):
return self._row_method
def _set_row_method(self, value):
self.check_method(value)
self._row_method = value
row_method = property(_get_row_method, _set_row_method)
def _get_col_method(self):
return self._column_method
def _set_col_method(self, value):
self.check_method(value)
self._column_method = value
column_method = property(_get_col_method, _set_col_method)
def _get_col_metric(self):
return self._column_metric
def _set_col_metric(self, value):
self.check_metric(value)
self._column_metric = value
column_metric = property(_get_col_metric, _set_col_metric)
def _get_row_metric(self):
return self._row_metric
def _set_row_metric(self, value):
self.check_metric(value)
self._row_metric = value
row_metric = property(_get_row_metric, _set_row_metric)
def plot(self, num=1, cmap=None, colorbar=True, vmin=None,
vmax=None, colorbar_position='right', gradient_span='None',
figsize=(12, 8),
fontsize=None
):
"""
:param gradient_span: None is default in R
Using::
df = pd.DataFrame({'A':[1,0,1,1],
'B':[.9,0.1,.6,1],
'C':[.5,.2,0,1],
'D':[.5,.2,0,1]})
and ::
h = Heatmap(df)
h.plot(vmin=0, vmax=1.1)
we seem to get the same as in R wiht ::
df = data.frame(A=c(1,0,1,1), B=c(.9,.1,.6,1), C=c(.5,.2,0,1), D=c(.5,.2,0,1))
heatmap((as.matrix(df)), scale='none')
.. todo:: right now, the order of cols and rows is random somehow.
could be ordered like in heatmap (r) byt mean of the row and col
or with a set of vector for col and rows.
heatmap((as.matrix(df)), Rowv=c(3,2), Colv=c(1), scale='none')
gives same as::
df = get_heatmap_df()
h = heatmap.Heatmap(df)
h.plot(vmin=-0, vmax=1.1)
"""
# save all parameters in a dict
layout = {}
if cmap is None:
cmap = self.params.cmap
try:cmap = colormap.cmap_builder(cmap)
except:pass
# keep track of row and column names for later.
row_header = self.frame.index
column_header = self.frame.columns
# FIXME something clever for the fontsize
if len(row_header) > 100 or len(column_header) > 100:
matplotlib.rcParams['font.size'] = 6
if len(row_header) > 50 or len(column_header) > 50:
matplotlib.rcParams['font.size'] = 7
if len(row_header) > 30 or len(column_header) > 30:
matplotlib.rcParams['font.size'] = 8
else:
matplotlib.rcParams['font.size'] = 12
if fontsize:
matplotlib.rcParams['font.size'] = fontsize
# scaling min/max range
self.gradient_span = gradient_span #'only_max'
# min_to_max, min_to_max_centered, only_max, only_min
if self.gradient_span == 'min_to_max_centered':
vmax = max([vmax, abs(vmin)])
vmin = vmax * -1
if self.gradient_span == 'only_max':
vmin = 0
vmax = self.frame.max().max()
if self.gradient_span == 'only_min':
vmin = self.frame.min().min()
vmax = 0
norm = matplotlib.colors.Normalize(vmin, vmax)
# Scale the figure window size #
fig = pylab.figure(num=num, figsize=figsize)
fig.clf()
# LAYOUT --------------------------------------------------
# ax1 (dendrogram 1) on the left of the heatmap
[ax1_x, ax1_y, ax1_w, ax1_h] = [0.05, 0.22, 0.2, 0.6]
width_between_ax1_axr = 0.004
# distance between the top color bar axis and the matrix
height_between_ax1_axc = 0.004
# Sufficient size to show
color_bar_w = 0.015
# axr, placement of row side colorbar
# second to last controls the width of the side color bar - 0.015 when showing
[axr_x, axr_y, axr_w, axr_h] = [0.31, 0.1, color_bar_w, 0.6]
axr_x = ax1_x + ax1_w + width_between_ax1_axr
axr_y = ax1_y; axr_h = ax1_h
width_between_axr_axm = 0.004
# axc, placement of column side colorbar #
# last one controls the hight of the top color bar - 0.015 when showing
[axc_x, axc_y, axc_w, axc_h] = [0.4, 0.63, 0.5, color_bar_w]
axc_x = axr_x + axr_w + width_between_axr_axm
axc_y = ax1_y + ax1_h + height_between_ax1_axc
height_between_axc_ax2 = 0.004
# axm, placement of heatmap for the data matrix # why larger than 1?
[axm_x, axm_y, axm_w, axm_h] = [0.4, 0.9, 2.5, 0.5]
axm_x = axr_x + axr_w + width_between_axr_axm
axm_y = ax1_y; axm_h = ax1_h
axm_w = axc_w
# ax2 (dendrogram 2), on the top of the heatmap #
[ax2_x, ax2_y, ax2_w, ax2_h] = [0.3, 0.72, 0.6, 0.15]
ax2_x = axr_x + axr_w + width_between_axr_axm
ax2_y = ax1_y + ax1_h + height_between_ax1_axc + axc_h + height_between_axc_ax2
ax2_w = axc_w
# axcb - placement of the color legend #
if colorbar_position == 'top left':
[axcb_x, axcb_y, axcb_w, axcb_h] = [0.07, 0.88, 0.18, 0.09]
elif colorbar_position == 'right':
[axcb_x, axcb_y, axcb_w, axcb_h] = [0.85, 0.2, 0.08, 0.6]
else:
raise ValueError("'top left' or 'right' accepted for now")
# COMPUTATION DENDOGRAM 1 -------------------------------------
if self.column_method:
Y = self.linkage(self.frame.transpose(),self.column_method,
self.column_metric )
ax2 = fig.add_axes([ax2_x, ax2_y, ax2_w, ax2_h], frame_on=True)
Z = hierarchy.dendrogram(Y)
ind2 = hierarchy.fcluster(Y, 0.7*max(Y[:,2]), self.cluster_criterion)
ax2.set_xticks([])
ax2.set_yticks([])
# apply the clustering for the array-dendrograms to the actual matrix data
idx2 = Z['leaves']
self.frame = self.frame.iloc[:,idx2]
# reorder the flat cluster to match the order of the leaves the dendrogram
ind2 = ind2[idx2]
layout['dendogram2'] = ax2
else:
idx2 = range(self.frame.shape[1])
# COMPUTATION DENDOGRAM 2 ---------------------------------
if self.row_method:
Y = self.linkage(self.frame, self.row_method, self.row_metric )
ax1 = fig.add_axes([ax1_x, ax1_y, ax1_w, ax1_h], frame_on=True)
Z = hierarchy.dendrogram(Y, orientation='right')
ind1 = hierarchy.fcluster(Y, 0.7*max(Y[:,2]), self.cluster_criterion)
ax1.set_xticks([])
ax1.set_yticks([])
# apply the clustering for the array-dendrograms to the actual matrix data
idx1 = Z['leaves']
self.frame = self.frame.iloc[idx1,:]
# reorder the flat cluster to match the order of the leaves the dendrogram
ind1 = ind1[idx1]
layout['dendogram1'] = ax1
else:
idx1 = range(self.frame.shape[0])
# HEATMAP itself
axm = fig.add_axes([axm_x, axm_y, axm_w, axm_h])
axm.imshow(self.frame, aspect='auto', origin='lower', interpolation='None',
cmap=cmap, norm=norm)
axm.set_xticks([])
axm.set_yticks([])
layout['heatmap'] = axm
# TEXT
new_row_header = []
new_column_header = []
for i in range(self.frame.shape[0]):
axm.text(self.frame.shape[1]-0.5, i, ' ' + str(row_header[idx1[i]]),
verticalalignment="center")
new_row_header.append(row_header[idx1[i]] if self.row_method else row_header[i])
for i in range(self.frame.shape[1]):
axm.text(i, -0.9, ' '+str(column_header[idx2[i]]),
rotation=90, verticalalignment="top",
horizontalalignment="center")
new_column_header.append(column_header[idx2[i]] if self.column_method else column_header[i])
# CATEGORY column ------------------------------
if self.category_column:
axc = fig.add_axes([axc_x, axc_y, axc_w, axc_h])
cmap_c = matplotlib.colors.ListedColormap(self.params.col_side_colors)
category_col = [self.category_column[self.df.columns[i]] for i in idx2]
dc = np.array(category_col, dtype=int)
dc.shape = (1,len(ind2))
axc.matshow(dc, aspect='auto', origin='lower', cmap=cmap_c)
axc.set_xticks([])
axc.set_yticks([])
layout['category_column'] = axc
# CATEGORY row -------------------------------
if self.category_row:
axr = fig.add_axes([axr_x, axr_y, axr_w, axr_h])
# self.category_row must be a dictionary with names as found in the columns
# of the dataframe.
category_row = [self.category_row[self.df.columns[i]] for i in idx1]
dr = np.array(category_row, dtype=int)
dr.shape = (len(category_row),1)
cmap_r = matplotlib.colors.ListedColormap(self.params.col_side_colors)
axr.matshow(dr, aspect='auto', origin='lower', cmap=cmap_r)
axr.set_xticks([])
axr.set_yticks([])
layout['category_row'] = axr
# COLORBAR ----------------------
if colorbar == True:
axcb = fig.add_axes([axcb_x, axcb_y, axcb_w, axcb_h], frame_on=False)
if colorbar_position == 'right':
orientation = 'vertical'
else:
orientation = 'horizontal'
cb = matplotlib.colorbar.ColorbarBase(axcb, cmap=cmap,
norm=norm, orientation=orientation)
#axcb.set_title("whatever")
#max_cb_ticks = 5
#axcb.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(max_cb_ticks))
layout['colorbar'] = cb
# could be useful
self.d = {'ordered': self.frame.copy(), 'rorder': idx1, 'corder': idx2}
return layout
| bsd-2-clause |
michaelaye/pyciss | pyciss/plotting.py | 1 | 9363 | import logging
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from ipywidgets import fixed, interact
from ._utils import which_epi_janus_resonance
from .meta import get_all_resonances
from .ringcube import RingCube
logger = logging.getLogger(__name__)
resonance_table = get_all_resonances()
interpolators = [
"none",
"nearest",
"bilinear",
"bicubic",
"spline16",
"spline36",
"hanning",
"hamming",
"hermite",
"kaiser",
"quadric",
"catrom",
"gaussian",
"bessel",
"mitchell",
"sinc",
"lanczos",
]
def lookup_rcparam(rcParams, pattern):
"""Look up a pattern in the matplotlib rcParams dict.
Just a little helper to get to the right MPL settings faster.
"""
return [i for i in rcParams.keys() if pattern in i]
def myimshow(img, vmin, vmax, i, cmap="gray"):
_, ax = plt.subplots(nrows=2, figsize=(10, 10))
ax, ax2 = ax
ax.imshow(
img,
vmin=vmin,
vmax=vmax,
aspect="auto",
interpolation=interpolators[i],
cmap=cmap,
)
ax.set_title(
"vmin: {:.2f}, vmax: {:.2f}, interpolator:{}".format(
vmin, vmax, interpolators[i]
)
)
tohist = img[~np.isnan(img)]
p1, p99 = np.percentile(tohist, (0.5, 99.5))
ax2.hist(img[~np.isnan(img)], 100, range=(p1, p99))
plt.show()
def myinteract(img):
min_ = round(np.nanmin(img), 4)
max_ = round(np.nanmax(img), 4)
p30, p70 = np.percentile(img[~np.isnan(img)], (30, 70))
delta = round((p30 - min_) / 50, 5)
interact(
myimshow,
img=fixed(img),
vmin=(min_, p30, delta),
vmax=(p70, max_, delta),
i=(0, len(interpolators) - 1),
)
def imshowlowhigh(data, low=10, high=90):
fig, ax = plt.subplots()
plow, phigh = np.percentile(data[~np.isnan(data)], (low, high))
ax.imshow(data, vmin=plow, vmax=phigh, cmap="gray", interpolation="sinc")
return fig
def add_ticks_to_x(ax, newticks, newnames):
"""Add new ticks to an axis.
I use this for the right-hand plotting of resonance names in my plots.
"""
ticks = list(ax.get_xticks())
ticks.extend(newticks)
ax.set_xticks(ticks)
names = list(ax.get_xticklabels())
names.extend(newnames)
ax.set_xticklabels(names)
def get_res_radius_from_res_name(res_name, cube):
moon, resonance = res_name.split()
moon = which_epi_janus_resonance(moon, cube.imagetime)
row = resonance_table.query("moon==@moon and reson==@resonance")
return row.squeeze()["radius"] * u.km
def soliton_plot(
cube,
solitons,
ax=None,
solitoncolor="red",
resonances=None,
draw_prediction=True,
soliton_controls_radius=False,
saveroot=None,
ifmin=None,
ifmax=None,
rmin=None,
rmax=None,
):
if ax is None:
# fig, ax = plt.subplots(figsize=(12, 9), nrows=2)
fig, ax = plt.subplots(nrows=2)
else:
fig = ax.get_figure()
# set resonances to True to get all (warning: in A ring too many to be useful)
if resonances is None:
# setting some reasonable defaults here:
resonances = ["janus", "prometheus", "epimetheus"]
cube.imshow(show_resonances=resonances, ax=ax[0], fig=fig, set_extent=True)
ticks = []
names = []
if draw_prediction:
for k, v in solitons.items():
ax[0].axhline(
y=v.to("Mm").value,
alpha=1,
color=solitoncolor,
linestyle="dashdot",
lw=3,
xmin=0.0,
xmax=0.25,
)
# the following is only really required if i want to show more
# than one prediction line
# ticks.append(v.to('Mm').value)
# names.append(k)
# soliton name and value, only using first found soliton
# TODO: create function that deals with more than one soliton
res_name, soliton_radius = next(iter(solitons.items()))
res_radius = get_res_radius_from_res_name(res_name, cube)
ax[0].axhline(
y=res_radius.to("Mm").value,
alpha=0.5,
color="cyan",
linestyle="dotted",
lw=3,
xmin=0.75,
xmax=1.0,
)
soliton_ax = None
# soliton_ax = ax[0].twinx()
# soliton_ax.ticklabel_format(useOffset=False)
# soliton_ax.set_yticks(np.array(ticks))
# soliton_ax.set_yticklabels(names)
# soliton_ax.axhline(y=res_radius.to('Mm').value, alpha=0.5,
# color='cyan', linestyle='dotted', lw=3,
# xmin=0.7, xmax=1.0)
if soliton_controls_radius:
radius_low = (res_radius - 20 * u.km).to(u.Mm)
radius_high = radius_low + 200 * u.km
for tempax in [ax[0], cube.resonance_axis]:
tempax.set_ybound(radius_low.value, radius_high.value)
if soliton_ax:
soliton_ax.set_ybound(radius_low.value, radius_high.value)
elif any([rmin is not None, rmax is not None]):
for tempax in [ax[0], cube.resonance_axis]:
tempax.set_ybound(rmin, rmax)
else:
# the min/max image radii otherwise control the plot in cube.imshow()
# so set the soliton display axis to the same values
soliton_ax.set_ybound(cube.minrad.value, cube.maxrad.value)
ax[1].plot(
np.linspace(*cube.extent[2:], cube.img.shape[0]),
np.nanmedian(cube.img, axis=1),
color="white",
lw=1,
)
if any([ifmin is not None, ifmax is not None]):
ax[1].set_ylim(ifmin, ifmax)
ticks = []
names = []
if draw_prediction:
for k, v in solitons.items():
ax[1].axvline(
x=v.to("Mm").value,
alpha=1,
color=solitoncolor,
linestyle="dashdot",
lw=4,
)
ticks.append(v.to("Mm").value)
names.append(k)
ax[1].axvline(
x=res_radius.to("Mm").value, alpha=0.5, color="cyan", linestyle="dotted", lw=3
)
ax[1].set_axis_bgcolor("black")
ax[1].set_title("Longitude-median profile over radius")
ax[1].set_xlabel("Radius [Mm]")
ax[1].set_ylabel("I/F")
if soliton_controls_radius:
ax[1].set_xlim(radius_low.value, radius_high.value)
elif any([rmin is not None, rmax is not None]):
ax[1].set_xlim(rmin, rmax)
else:
ax[1].set_xlim(cube.minrad.value, cube.maxrad.value)
savepath = "{}_{}.png".format(cube.pm.img_id, "_".join(res_name.split()))
if saveroot is not None:
root = Path(saveroot)
root.mkdir(exist_ok=True)
savepath = root / savepath
fig.savefig(str(savepath), dpi=100)
def resonance_plot(
img_id,
ax=None,
cube=None,
saveroot=None,
ifmin=None,
ifmax=None,
rmin=None,
rmax=None,
):
if cube is None:
cube = RingCube(img_id)
if ax is None:
fig, ax = plt.subplots(nrows=2)
else:
fig = ax[0].get_figure()
for axes in fig.axes:
if axes not in ax:
axes.remove()
cube.imshow(show_resonances=["janus"], ax=ax[0], set_extent=True)
# soliton name and value, only using first found soliton
# TODO: create function that deals with more than one soliton
row_filter = cube.inside_resonances.moon == cube.janus_swap_phase
if any(row_filter):
cols = ["radius", "reson"]
res_radius, res_name = cube.inside_resonances.loc[row_filter, cols].squeeze()
res_radius *= u.km
ax[0].axhline(
y=res_radius.to("Mm").value,
alpha=0.5,
color="cyan",
linestyle="dotted",
lw=3,
xmin=0.75,
xmax=1.0,
)
if any([rmin is not None, rmax is not None]):
radius_low = rmin
radius_high = rmax
else:
radius_low = (res_radius - 20 * u.km).to(u.Mm)
radius_high = radius_low + 200 * u.km
for tempax in [ax[0], cube.resonance_axis]:
tempax.set_ybound(radius_low.value, radius_high.value)
else:
res_name = "no_janus_res"
ifs = np.nan_to_num(cube.median_profile)
ifs[ifs < 0] = 0
ax[1].plot(
np.linspace(*cube.extent[2:], cube.img.shape[0]), ifs, color="white", lw=1
)
if any([ifmin is not None, ifmax is not None]):
iflow = ifmin
ifhigh = ifmax
else:
iflow, ifhigh = np.percentile(ifs[~np.isnan(ifs)], (0.5, 99.5))
ax[1].set_ylim(iflow / 1.1, ifhigh * 1.1)
if any(row_filter):
ax[1].axvline(
x=res_radius.to("Mm").value,
alpha=0.5,
color="cyan",
linestyle="dotted",
lw=3,
)
ax[1].set_xlim(radius_low.value, radius_high.value)
ax[1].set_facecolor("black")
ax[1].set_title("Longitude-median profile over radius")
ax[1].set_xlabel("Radius [Mm]")
ax[1].set_ylabel("I/F")
if saveroot is not None:
savepath = f"{cube.pm.img_id}_{res_name.replace(':', '_')}.png"
root = Path(saveroot)
root.mkdir(exist_ok=True)
savepath = root / savepath
print(savepath)
logger.info("Saving file at %s", str(savepath))
fig.savefig(str(savepath), dpi=200)
return fig, cube.resonance_axis
| isc |
duttashi/Data-Analysis-Visualization | scripts/general/chiSquareTest.py | 1 | 3347 | __author__ = 'Ashoo'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats
import warnings
sns.set(color_codes=True)
# Reading the data where low_memory=False increases the program efficiency
data= pd.read_csv("gapminder.csv", low_memory=False)
# setting variables that you will be working with to numeric
data['breastcancerper100th']= data['breastcancerper100th'].convert_objects(convert_numeric=True)
data['femaleemployrate']= data['femaleemployrate'].convert_objects(convert_numeric=True)
data['alcconsumption']= data['alcconsumption'].convert_objects(convert_numeric=True)
#print "Showing missing data coulmn-wise"
#print data.isnull().sum()
# Create a copy of the original dataset as sub5 by using the copy() method
sub5=data.copy()
# Since the data is all continuous variables therefore the use the mean() for missing value imputation
sub5.fillna(sub5['breastcancerper100th'].mean(), inplace=True)
sub5.fillna(sub5['femaleemployrate'].mean(), inplace=True)
sub5.fillna(sub5['alcconsumption'].mean(), inplace=True)
# Showing the count of null values after imputation
#print sub5.isnull().sum()
# categorize quantitative variable based on customized splits using the cut function
sub5['alco']=pd.qcut(sub5.alcconsumption,6,labels=["0","1-4","5-9","10-14","15-19","20-24"])
sub5['brst']=pd.qcut(sub5.breastcancerper100th,5,labels=["1-20","21-40","41-60","61-80","81-90"])
# Converting response variable to categorical
sub5['brst']=sub5['brst'].astype('category')
# Cross tabulating the response variable with explantory variable
ct1=pd.crosstab(sub5['brst'],sub5['alco'])
#ct1=pd.crosstab(sub5['alco'],sub5['brst'])
print "Contigency Table"
print ct1
print "\n\n"
# the axis=0 statement tells python to sum all the values in each column in python
colsum=ct1.sum(axis=0)
colpct=ct1/colsum
print(colpct)
# Chi-Square
print('\n\nChi-square value, p value, expected counts')
cs1=scipy.stats.chi2_contingency(ct1)
print(cs1)
sub5['brst']=sub5['brst'].astype('category')
sub5['alco']=sub5['alco'].convert_objects(convert_numeric=True)
#sns.factorplot(x='alcconsumption', y='breastcancerper100th', data=sub5, kind="bar", ci=None)
sns.factorplot(x='alco', y='brst', data=sub5, kind="bar",ci=None)
plt.xlabel("Alcohol consumption in Liters")
plt.ylabel("Breast Cancer cases per 100th women")
# ====================================================
# POST HOC COMPARISON TEST
recode2={1-20:1,21-40:2}
sub5['COMP1v2']=sub5['brst'].map(recode2)
ct2=pd.crosstab(sub5['brst'],sub5['COMP1v2'])
print "Contigency Table -2\n"
print ct2
print "\n\n"
# the axis=0 statement tells python to sum all the values in each column in python
colsum=ct2.sum(axis=0)
colpct=ct2/colsum
print(colpct)
# Chi-Square
print('\n\nChi-square value, p value, expected counts')
cs2=scipy.stats.chi2_contingency(ct2)
print(cs2)
#######################################################
recode3={41-60:3,61-80:4}
sub5['COMP1v3']=sub5['alco'].map(recode3)
ct3=pd.crosstab(sub5['brst'],sub5['COMP1v3'])
print "Contigency Table - 3\n"
print ct3
print "\n\n"
# the axis=0 statement tells python to sum all the values in each column in python
colsum=ct3.sum(axis=0)
colpct=ct3/colsum
print(colpct)
# Chi-Square
print('\n\nChi-square value, p value, expected counts')
cs3=scipy.stats.chi2_contingency(ct3)
print(cs3)
| mit |
sgranitz/northwestern | predict420/grex2_Customer_Sales_Analysis.py | 2 | 7041 | # Stephan Granitz [ GrEx2 ]
# Import libraries
import pandas as pd
import numpy as np
import shelve
import sqlite3
# 1 Import each of the csv files you downloaded from the SSCC into a pandas DF
# Grab files
folder = "C:/Users/sgran/Desktop/GrEx2/"
io1 = folder + "seg6770cust.csv"
io2 = folder + "seg6770item.csv"
io3 = folder + "seg6770mail.csv"
# 1 a)
# Fill blanks with NaN for easier handling
customer = pd.read_csv(io1, low_memory=False).fillna(np.nan)
item = pd.read_csv(io2).fillna(np.nan)
mail = pd.read_csv(io3).fillna(np.nan)
# 1 b) print columns in item DF and first 4 records
list(item.columns.values)
item.head(4)
# 1 c) describe data types of cols in DFs
item.info(verbose=False)
mail.info(verbose=False)
customer.info(verbose=False)
# 2 Write each of you pandas DataFrames to a local SQLite DB named xyz.db.
# Include only data for active buyers in these tables
active_customer = customer[customer.buyer_status == 'ACTIVE']
# Filter 'item' and 'mail' tables to only include active buyers
active_item = item[item['acctno'].isin(active_customer['acctno'])]
active_mail = mail[mail['acctno'].isin(active_customer['acctno'])]
# Connect to xyz.db
db = sqlite3.connect('xyz.db')
# Put DFs into the DB
active_customer.to_sql(
'customer',
db,
if_exists='replace',
index=False
)
active_item.to_sql(
'item',
db,
if_exists='replace',
index=False
)
active_mail.to_sql(
'mail',
db,
if_exists='replace',
index=False
)
# Commit the DB write
db.commit()
# Verify that you have written the tables to your SQLite DB correctly
cursor = db.cursor()
query = 'select * from customer limit 1'
res = cursor.execute(query)
res.fetchall()[0][0:10]
cursor.executescript('drop table if exists custSum;')
db.commit()
# 3 Using the same data from 2 above, create a new table called custSum
cursor.execute('''
CREATE TABLE custSum(
acctno TEXT PRIMARY KEY, zip INTEGER, zip4 INTEGER, heavy_buyer TEXT,
has_amex TEXT, has_disc TEXT, has_visa TEXT, has_mc TEXT,
est_income INTEGER, adult1_g TEXT, adult2_g TEXT
)
''')
db.commit()
# Filter to the columns needed
cols = [
'acctno', 'zip', 'zip4', 'ytd_sales_2009', 'amex_prem', 'amex_reg',
'disc_prem', 'disc_reg', 'visa_prem', 'visa_reg', 'mc_prem', 'mc_reg',
'inc_scs_amt_v4', 'adult1_g', 'adult2_g'
]
custSum = active_customer[cols]
# Validate
custSum.head(3).transpose().head(6)
# 3 a) indicator of whether the customer is a 'heavy buyer,' where the definition
# of a 'heavy buyer' is a customer whose YTD purchasing in 2009 is greater than
# 90% of the 2009 YTD purchasing of all customers who are active buyers
heavy = custSum.ytd_sales_2009.dropna().quantile([0.9])[0.9]
custSum['heavy_buyer'] = 'N'
custSum.loc[custSum.ytd_sales_2009 > heavy, 'heavy_buyer'] = 'Y'
# 3 b) Add whether the customer has the following credit cards
# (AMEX, DISC, VISA, MC)
custSum['has_amex'] = 'N'
custSum.loc[custSum.amex_prem == 'Y', 'has_amex'] = 'Y'
custSum.loc[custSum.amex_reg == 'Y', 'has_amex'] = 'Y'
custSum['has_disc'] = 'N'
custSum.loc[custSum.disc_prem == 'Y', 'has_disc'] = 'Y'
custSum.loc[custSum.disc_reg == 'Y', 'has_disc'] = 'Y'
custSum['has_visa'] = 'N'
custSum.loc[custSum.visa_prem == 'Y', 'has_visa'] = 'Y'
custSum.loc[custSum.visa_reg == 'Y', 'has_visa'] = 'Y'
custSum['has_mc'] = 'N'
custSum.loc[custSum.mc_prem == 'Y', 'has_mc'] = 'Y'
custSum.loc[custSum.mc_reg == 'Y', 'has_mc'] = 'Y'
# Drop columns no longer needed
custSum.drop(
['ytd_sales_2009', 'amex_prem', 'amex_reg', 'disc_prem', 'disc_reg',
'visa_prem', 'visa_reg', 'mc_prem', 'mc_reg'], inplace=True, axis=1
)
# 3 c,d,e) Est income, zip, acctno
custSum.rename(columns={'inc_scs_amt_v4': 'est_income'}, inplace=True)
custSum.est_income = custSum.est_income.astype(float)
custSum = custSum[[
'acctno', 'zip', 'zip4', 'heavy_buyer', 'has_amex', 'has_disc',
'has_visa', 'has_mc', 'est_income', 'adult1_g', 'adult2_g'
]]
# Fill the table in the DB
query = '''
insert or replace into custSum
(acctno, zip, zip4, heavy_buyer, has_amex, has_disc,
has_visa, has_mc, est_income, adult1_g, adult2_g)
values (?,?,?,?,?,?,?,?,?,?,?)
'''
# 3 f) count of the number of records in each table
query = 'select count(*) from '
res1 = cursor.execute(query + 'custSum')
print('Rows in custSum', res1.fetchall())
res2 = cursor.execute(query + 'customer')
print('Rows in customer', res2.fetchall())
res3 = cursor.execute(query + 'item')
print('Rows in item', res3.fetchall())
res4 = cursor.execute(query + 'mail')
print('Rows in mail', res4.fetchall())
# 3 g) Verify table written to SQLite DB correctly
query = 'select * from custSum limit 5'
res = cursor.execute(query)
res.fetchall()
# Close the db connection
db.close()
# 4 a) Target maketing with active buyers or lapsed buyers
marketing = customer[
(customer.buyer_status == 'ACTIVE') |
(customer.buyer_status == 'LAPSED')
]
# 4 b) Find which categories each customer made purchases in
purchase = item.groupby(['acctno','deptdescr'], as_index=False)
purchase = purchase.aggregate(np.sum)
# 4 b) Indicator variable (1/0) for each product category customer made
# at least one purchase
purchase_cats = purchase.pivot(
index='acctno', columns='deptdescr', values='totamt'
)
# NaN means they didn't make any purchases
purchase_cats = pd.DataFrame(purchase_cats.to_records()).fillna(0)
def findSales (cat_list):
for cat in cat_list:
purchase_cats[cat] = purchase_cats[cat].apply(
lambda x: 1 if (x > 0) else 0)
findSales(list(purchase_cats.columns.values)[1::])
# 4 c) Include buyer status & total dollar amount of purchases
cols = ['acctno', 'buyer_status', 'ytd_sales_2009']
sales_info = marketing[cols].merge(purchase_cats)
sales_info.head(3).transpose()
# 4 d) Write your DataFrame to a csv file & store in a shelve database
path = folder + 'sales.csv'
sales_info.to_csv(path, header=True)
sales_shelf = shelve.open('sales_shelf.dbm')
sales_shelf['sales'] = sales_info
sales_shelf.sync()
sales_shelf.close()
# 4 e) Verify the shelve worked
sales_shelf = shelve.open('sales_shelf.dbm')
sales_shelf['sales'].head(3).transpose()
sales_shelf.close()
# 5 Report 6 most frequently purchased product cats by the gender of adult 1
# Add column to count number of adults in each category
purchase['count_adults'] = 1
cols = ['acctno', 'adult1_g']
purchase_gender = purchase.merge(marketing[cols]).groupby(
['adult1_g','deptdescr'], as_index=False)
purchase_gender = purchase_gender.aggregate(np.sum)
purchase_gender.drop('price', axis=1, inplace=True)
# List gender types
purchase_gender.adult1_g.unique()
# Print top 6 most purchased by gender
purchase_gender[purchase_gender['adult1_g'] == 'B'].sort(
['qty'], ascending=False).head(6)
purchase_gender[purchase_gender['adult1_g'] == 'F'].sort(
['qty'], ascending=False).head(6)
purchase_gender[purchase_gender['adult1_g'] == 'M'].sort(
['qty'], ascending=False).head(6)
purchase_gender[purchase_gender['adult1_g'] == 'U'].sort(
['qty'], ascending=False).head(6)
| mit |
mehdidc/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
jwlockhart/concept-networks | nlp.py | 1 | 2220 | # utility functions for NLP-based similarity metrics
# version 1.0
# code modified from:
# https://stackoverflow.com/questions/8897593/similarity-between-two-text-documents
import nltk
import string
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
stemmer = nltk.stem.porter.PorterStemmer()
remove_punctuation_map = dict((ord(char), None)
for char in string.punctuation)
def stem_tokens(tokens):
'''word stems'''
return [stemmer.stem(item) for item in tokens]
def normalize(text):
'''remove punctuation, lowercase, stem'''
clean = text.lower().translate(remove_punctuation_map)
tokens = nltk.word_tokenize(clean)
return stem_tokens(tokens)
vectorizer = TfidfVectorizer(tokenizer=normalize, stop_words='english')
def cosine_sim(texts):
'''return cosine similarity of an array of documents, such that
any individual similarity is conditional on not just the two
vectors being compared, but all other vectors present.
'''
tfidf = vectorizer.fit_transform(texts)
return (tfidf * tfidf.T).A
def cosine_sim_2(text1, text2):
'''return cosine similarity of two documents. Will be higher
than the similarity between those documents in cosine_sim_all
because that includes more documents in the space.
'''
return cosine_sim_all([text1, text2])[0,1]
def cosine_sim_pd(docs, codes):
'''convert output to pandas dataframe with labels.
'''
cosine_similarities = cosine_sim(docs)
return pd.DataFrame(cosine_similarities, columns=codes,
index=codes)
def make_docs(df, code_cols, text_col='Excerpt Copy'):
'''Create documents containing all text from text_col matching
each code in code_cols.
'''
#a list of one string for each code
documents = []
#concat all strings for each code into a single document
for code in code_cols:
#select matching subset
tmp = df[df[code] == True]
#select the text of answers
answers = tmp[text_col]
#join all the answers into one document
merged = ' .\n'.join(answers)
documents.append(merged)
return documents | gpl-3.0 |
QuantSoftware/QuantSoftwareToolkit | bin/investors_report.py | 5 | 6911 | #
# report.py
#
# Generates a html file containing a report based
# off a timeseries of funds from a pickle file.
#
# Drew Bratcher
#
from pylab import *
import numpy
from QSTK.qstkutil import DataAccess as da
from QSTK.qstkutil import qsdateutil as du
from QSTK.qstkutil import tsutil as tsu
from QSTK.quicksim import quickSim as qs
import converter
import datetime as dt
from pandas import *
import matplotlib.pyplot as plt
import cPickle
def readableDate(date):
return str(date.month)+"/"+str(date.day)+"/"+str(date.year)
def getYearReturn(funds, year):
days=[]
for date in funds.index:
if(date.year==year):
days.append(date)
return funds[days[-1]]/funds[days[0]]-1
def getYearMaxDrop(funds, year):
days=[]
for date in funds.index:
if(date.year==year):
days.append(date)
maxdrop=0
prevday=days[0]
for day in days[1:-1]:
if((funds[day]/funds[prevday]-1)<maxdrop):
maxdrop=funds[day]/funds[prevday]-1
prevday=day
return maxdrop
def getYearRatioUsingMonth(funds,year):
days=[]
for date in funds.index:
if(date.year==year):
days.append(date)
funds=funds.reindex(index=days)
m=tsu.monthly(funds)
avg=float(sum(m))/len(m)
std=0
for a in m:
std=std+float((float(a-avg))**2)
std=sqrt(float(std)/(len(m)-1))
return (avg/std)
def getWinningDays(funds1,funds2,year):
days=[]
i=0;
win=0
tot=0
f1ret=tsu.daily(funds1)
f2ret=tsu.daily(funds2)
relf1=[]
relf2=[]
for date in funds1.index:
if(date.year==year):
for date2 in funds2.index:
if(date==date2):
relf1.append(f1ret[i])
relf2.append(f2ret[i])
i+=1
for i in range(0,len(relf1)):
if(f1ret[i]>f2ret[i]):
win+=1
tot+=1
return float(win)/tot
def runOther(funds,symbols):
tsstart =dt.datetime(funds.index[0].year,funds.index[0].month,funds.index[0].day)
tsend =dt.datetime(funds.index[-1].year,funds.index[-1].month,funds.index[-1].day)
timeofday=dt.timedelta(hours=16)
timestamps=du.getNYSEdays(tsstart,tsend,timeofday)
dataobj=da.DataAccess('Norgate')
historic=dataobj.get_data(timestamps,symbols,"close")
alloc_val=float(0.1/(float(len(symbols))+1))
alloc_vals=alloc_val*ones(len(symbols))
alloc=DataMatrix(index=[historic.index[0]],data=[alloc_vals], columns=symbols)
alloc=alloc.append(DataMatrix(index=[historic.index[-1]], data=[alloc_vals], columns=symbols))
alloc['_CASH']=alloc_val
return qs.quickSim(alloc,historic,1000)
def reportFunctionality(funds, symbols,filename=sys.stdout):
if(len(symbols)!=0):
funds2=runOther(funds,symbols)
arg2=1
else:
arg2=0
if(filename==sys.stdout):
html_file=sys.stdout
else:
html_file = open(filename,"w")
#top
html_file.write("<HTML>\n")
html_file.write("<HEAD>\n")
html_file.write("<TITLE>QSTK Generated Report from "+readableDate(funds.index[0])+" to "+readableDate(funds.index[-1])+"</TITLE>\n")
html_file.write("</HEAD>\n\n")
html_file.write("<BODY><CENTER>\n\n")
years=du.getYears(funds)
html_file.write("<H2>Performance Summary for "+sys.argv[1]+"</H2>\n")
html_file.write("For the dates "+readableDate(funds.index[0])+" to "+readableDate(funds.index[-1])+"\n")
html_file.write("<H3>Yearly Performance Metrics</H3>\n")
html_file.write("<TABLE CELLPADDING=10>\n")
html_file.write("<TR><TH></TH>\n")
for year in years:
html_file.write("<TH>"+str(year)+"</TH>\n")
html_file.write("</TR>\n")
#yearly return
html_file.write("<TR>\n")
html_file.write("<TH>Annualized Return:</TH>\n")
for year in years:
retur=getYearReturn(funds,year)
html_file.write("<TD>\n")
print >>html_file, "%.2f\n" % (retur*100)
html_file.write("%</TD>\n")
html_file.write("</TR>\n")
#yearly winning days
html_file.write("<TR>\n")
html_file.write("<TH>Winning Days:</TH>\n")
for year in years:
# change to compare to inputs - ratio=tsu.getYearRatio(funds,year)
if(arg2!=0):
win=getWinningDays(funds,funds2,year)
html_file.write("<TD>\n")
print >>html_file, "%.2f\n" % (win*100)
html_file.write("%</TD>\n")
else:
html_file.write("<TD>No comparison.</TD>\n")
html_file.write("</TR>\n")
#max draw down
html_file.write("<TR>\n")
html_file.write("<TH>Max Draw Down:</TH>\n")
for year in years:
drop=getYearMaxDrop(funds,year)
html_file.write("<TD>\n")
print >>html_file, "%.2f" % (drop*100)
html_file.write("%</TD>\n")
html_file.write("</TR>\n")
#yearly sharpe ratio using daily rets
html_file.write("<TR>\n")
html_file.write("<TH>Daily Sharpe Ratio:</TH>\n")
for year in years:
ratio=tsu.getYearRatio(funds,year)
html_file.write("<TD>\n")
print >>html_file, "%.2f\n" % ratio
html_file.write("</TD>\n")
html_file.write("</TR>\n")
#yearly sharpe ratio using monthly rets
html_file.write("<TR>\n")
html_file.write("<TH>Monthly Sharpe Ratio:</TH>\n")
for year in years:
ratio=getYearRatioUsingMonth(funds,year)
html_file.write("<TD>\n")
print >>html_file, "%.2f\n" % ratio
html_file.write("</TD>\n")
html_file.write("</TR>\n")
html_file.write("</TABLE>\n")
html_file.write("<BR/>\n\n")
vals=funds.values;
vals2=np.append(vals,funds2.values,2)
df=DataMatrix(index=funds.index,data=funds.values, columns=['fund'])
df2=DataMatrix(index=funds2.index,data=funds2.values,columns=['other'])
df['other']=df2['other']
corrcoef=numpy.corrcoef(funds.values[0:-1],funds2.values)
html_file.write("<H3>Correlation=")
print >>html_file, "%.2f\n" % corrcoef[0][1]
html_file.write("<H3>\n")
html_file.write("<BR/>\n\n")
#montly returns
mrets=tsu.monthly(funds)
html_file.write("<H2>Monthly Returns</H2>\n")
html_file.write("<TABLE CELLPADDING=10>\n")
html_file.write("<TR>\n")
html_file.write("<TH></TH>\n")
month_names=du.getMonthNames()
for name in month_names:
html_file.write("<TH>"+str(name)+"</TH>\n")
html_file.write("</TR>\n")
i=0
for year in years:
html_file.write("<TR>\n")
html_file.write("<TH>"+str(year)+"</TH>\n")
months=du.getMonths(funds,year)
for month in months:
html_file.write("<TD>\n")
print >>html_file, "%.2f\n" % (mrets[i]*100)
html_file.write("%</TD>\n")
i+=1
html_file.write("</TR>\n")
html_file.write("</TABLE>\n")
html_file.write("<BR/>\n\n")
#fund value graph
fundlist=[];
fundlist.append(funds)
fundlist.append(funds2)
converter.fundsToPNG(fundlist,'funds.png')
html_file.write("<IMG SRC=\'./funds.png\'/>\n")
html_file.write("<BR/>\n\n")
#end
html_file.write("</CENTER></BODY>\n\n")
html_file.write("</HTML>")
if __name__ == '__main__':
input=open(sys.argv[1],"r")
funds=cPickle.load(input)
if(len(sys.argv)>2):
input2=sys.argv[2]
symbols=sys.argv[2].split(',')
reportFunctionality(funds,symbols,'investors_report.html')
else:
reportFunctionality(funds,0,'investors_report.html')
| bsd-3-clause |
nschloe/matplotlib2tikz | test/test_rotated_labels.py | 1 | 3361 | # -*- coding: utf-8 -*-
#
import os
import tempfile
import pytest
from matplotlib import pyplot as plt
import matplotlib2tikz
def __plot():
fig, ax = plt.subplots()
x = [1, 2, 3, 4]
y = [1, 4, 9, 6]
plt.plot(x, y, "ro")
plt.xticks(x, rotation="horizontal")
return fig, ax
@pytest.mark.parametrize(
"x_alignment, y_alignment, x_tick_label_width, y_tick_label_width, rotation",
[
(None, None, "1rem", "3rem", 90),
(None, "center", "1rem", "3rem", 90),
("center", None, "1rem", "3rem", 90),
("center", "center", None, "3rem", 90),
("left", "left", None, "3rem", 90),
("right", "right", None, "3rem", 90),
("center", "center", "1rem", None, 90),
("left", "left", "2rem", None, 90),
("right", "right", "3rem", None, 90),
("center", "center", "1rem", "3rem", 90),
("left", "left", "2rem", "3rem", 90),
("right", "right", "3rem", "3rem", 90),
("left", "right", "2rem", "3rem", 90),
("right", "left", "3rem", "3rem", 90),
],
)
def test_rotated_labels_parameters(
x_alignment, y_alignment, x_tick_label_width, y_tick_label_width, rotation
):
fig, _ = __plot()
if x_alignment:
plt.xticks(ha=x_alignment, rotation=rotation)
if y_alignment:
plt.yticks(ha=y_alignment, rotation=rotation)
# convert to tikz file
_, tmp_base = tempfile.mkstemp()
tikz_file = tmp_base + "_tikz.tex"
extra_dict = {}
if x_tick_label_width:
extra_dict["x tick label text width"] = x_tick_label_width
if y_tick_label_width:
extra_dict["y tick label text width"] = y_tick_label_width
matplotlib2tikz.save(
tikz_file, figurewidth="7.5cm", extra_axis_parameters=extra_dict
)
# close figure
plt.close(fig)
# delete file
os.unlink(tikz_file)
return
@pytest.mark.parametrize(
"x_tick_label_width, y_tick_label_width",
[(None, None), ("1rem", None), (None, "3rem"), ("2rem", "3rem")],
)
def test_rotated_labels_parameters_different_values(
x_tick_label_width, y_tick_label_width
):
fig, ax = __plot()
plt.xticks(ha="left", rotation=90)
plt.yticks(ha="left", rotation=90)
ax.xaxis.get_majorticklabels()[0].set_rotation(20)
ax.yaxis.get_majorticklabels()[0].set_horizontalalignment("right")
# convert to tikz file
_, tmp_base = tempfile.mkstemp()
tikz_file = tmp_base + "_tikz.tex"
extra_dict = {}
if x_tick_label_width:
extra_dict["x tick label text width"] = x_tick_label_width
if y_tick_label_width:
extra_dict["y tick label text width"] = y_tick_label_width
matplotlib2tikz.save(
tikz_file, figurewidth="7.5cm", extra_axis_parameters=extra_dict
)
# close figure
plt.close(fig)
# delete file
os.unlink(tikz_file)
return
def test_rotated_labels_parameters_no_ticks():
fig, ax = __plot()
ax.xaxis.set_ticks([])
plt.tick_params(axis="x", which="both", bottom="off", top="off")
plt.tick_params(axis="y", which="both", left="off", right="off")
# convert to tikz file
_, tmp_base = tempfile.mkstemp()
tikz_file = tmp_base + "_tikz.tex"
matplotlib2tikz.save(tikz_file, figurewidth="7.5cm")
# close figure
plt.close(fig)
# delete file
os.unlink(tikz_file)
return
| mit |
alekz112/statsmodels | statsmodels/iolib/tests/test_summary.py | 31 | 1535 | '''examples to check summary, not converted to tests yet
'''
from __future__ import print_function
if __name__ == '__main__':
from statsmodels.regression.tests.test_regression import TestOLS
#def mytest():
aregression = TestOLS()
TestOLS.setupClass()
results = aregression.res1
r_summary = str(results.summary_old())
print(r_summary)
olsres = results
print('\n\n')
r_summary = str(results.summary())
print(r_summary)
print('\n\n')
from statsmodels.discrete.tests.test_discrete import TestProbitNewton
aregression = TestProbitNewton()
TestProbitNewton.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
print('\n\n')
probres = results
from statsmodels.robust.tests.test_rlm import TestHampel
aregression = TestHampel()
#TestHampel.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
rlmres = results
print('\n\n')
from statsmodels.genmod.tests.test_glm import TestGlmBinomial
aregression = TestGlmBinomial()
#TestGlmBinomial.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
#print(results.summary2(return_fmt='latex'))
#print(results.summary2(return_fmt='csv'))
smry = olsres.summary()
print(smry.as_csv())
# import matplotlib.pyplot as plt
# plt.plot(rlmres.model.endog,'o')
# plt.plot(rlmres.fittedvalues,'-')
#
# plt.show() | bsd-3-clause |
fadawar/election_wordcloud | generate.py | 1 | 2431 | #!/usr/bin/env python2
"""
Generate wordclouds with specific colors.
colors - list of rgb colors
file_name - path to file with source text
"""
from os import path
import random
import functools
from wordcloud import WordCloud
import matplotlib.pyplot as plt
# Smer
# colors = [(195, 27, 51)]
# file_name = 'programy/smer-lemma-simple.txt'
# Siet
# colors = [(235, 90, 84), (36, 142, 186)]
# file_name = 'programy/siet-lemma-simple.txt'
# KDH
# colors = [(214, 12, 26), (0, 80, 140)]
# file_name = 'programy/kdh-lemma-simple.txt'
# OLaNO
colors = [(178, 200, 0), (72, 81, 89)]
file_name = 'programy/olano-lemma-simple.txt'
# Most-Hid
# colors = [(245, 128, 37), (35, 31, 32)]
# file_name = 'programy/most-hid-lemma-simple.txt'
# SaS
# colors = [(166, 206, 56), (0, 132, 203), (0, 70, 125)]
# file_name = 'programy/sas-lemma-simple.txt'
# Read the whole text.
d = path.dirname(__file__)
text = open(path.join(d, file_name)).read()
def crop_rgb(num):
if num < 0:
return 0
elif num > 255:
return 255
else:
return num
def random_similar_color(rgb_colors, *args, **keywords):
limit = 60
r, g, b = random.choice(rgb_colors)
max_part = max(r, g, b)
new_color = lambda x: crop_rgb(x + random.randint(-limit, limit))
if max_part == r:
return new_color(r), g, b
elif max_part == g:
return r, new_color(g), b
else:
return r, g, new_color(b)
# function returns random color but similar to colors from parameter
color_func = functools.partial(random_similar_color, colors)
# Ignore single characters, plus list of words provided in separate file
chars = [chr(i) for i in range(ord('a'), ord('z') + 1)]
words = []
with open('slovak-stopwords.txt', 'r') as f:
for line in f:
words.append(line.strip())
ignorewords = chars + words
# Generate a word cloud image
# take relative word frequencies into account, lower max_font_size
wordcloud = WordCloud(stopwords=ignorewords,
width=1280, height=1280,
background_color='white',
color_func=color_func,
# font_path='/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-R.ttf',
prefer_horizontal=0.9,).generate(text)
plt.figure()
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
# The pil way (if you don't have matplotlib)
# image = wordcloud.to_image()
# image.show()
| mit |
brodoll/sms-tools | lectures/08-Sound-transformations/plots-code/FFT-filtering.py | 21 | 1723 | import math
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
N = 2048
start = 1.0*fs
x1 = x[start:start+N]
plt.figure(1, figsize=(12, 9))
plt.subplot(321)
plt.plot(np.arange(N)/float(fs), x1*np.hamming(N), 'b', lw=1.5)
plt.axis([0, N/float(fs), min(x1*np.hamming(N)), max(x1*np.hamming(N))])
plt.title('x (orchestra.wav)')
mX, pX = DFT.dftAnal(x1, np.hamming(N), N)
startBin = int(N*500.0/fs)
nBins = int(N*4000.0/fs)
bandpass = (np.hanning(nBins) * 60.0) - 60
filt = np.zeros(mX.size)-60
filt[startBin:startBin+nBins] = bandpass
mY = mX + filt
plt.subplot(323)
plt.plot(fs*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5, label = 'mX')
plt.plot(fs*np.arange(mX.size)/float(mX.size), filt+max(mX), 'k', lw=1.5, label='filter')
plt.legend(prop={'size':10})
plt.axis([0,fs/4.0,-90,max(mX)+2])
plt.title('mX + filter')
plt.subplot(325)
plt.plot(fs*np.arange(pX.size)/float(pX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pX),8])
plt.title('pX')
y = DFT.dftSynth(mY, pX, N)*sum(np.hamming(N))
mY1, pY = DFT.dftAnal(y, np.hamming(N), N)
plt.subplot(322)
plt.plot(np.arange(N)/float(fs), y, 'b')
plt.axis([0, float(N)/fs, min(y), max(y)])
plt.title('y')
plt.subplot(324)
plt.plot(fs*np.arange(mY1.size)/float(mY1.size), mY1, 'r', lw=1.5)
plt.axis([0,fs/4.0,-90,max(mY1)+2])
plt.title('mY')
plt.subplot(326)
plt.plot(fs*np.arange(pY.size)/float(pY.size), pY, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pY),8])
plt.title('pY')
plt.tight_layout()
plt.savefig('FFT-filtering.png')
plt.show()
| agpl-3.0 |
anurag313/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
frank-tancf/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 278 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
anne-urai/serialDDM | graphicalModels/examples/classic.py | 7 | 1057 | """
The Quintessential PGM
======================
This is a demonstration of a very common structure found in graphical models.
It has been rendered using Daft's default settings for all the parameters
and it shows off how much beauty is baked in by default.
"""
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
import daft
# Instantiate the PGM.
pgm = daft.PGM([2.3, 2.05], origin=[0.3, 0.3])
# Hierarchical parameters.
pgm.add_node(daft.Node("alpha", r"$\alpha$", 0.5, 2, fixed=True))
pgm.add_node(daft.Node("beta", r"$\beta$", 1.5, 2))
# Latent variable.
pgm.add_node(daft.Node("w", r"$w_n$", 1, 1))
# Data.
pgm.add_node(daft.Node("x", r"$x_n$", 2, 1, observed=True))
# Add in the edges.
pgm.add_edge("alpha", "beta")
pgm.add_edge("beta", "w")
pgm.add_edge("w", "x")
pgm.add_edge("beta", "x")
# And a plate.
pgm.add_plate(daft.Plate([0.5, 0.5, 2, 1], label=r"$n = 1, \cdots, N$",
shift=-0.1))
# Render and save.
pgm.render()
pgm.figure.savefig("classic.pdf")
pgm.figure.savefig("classic.png", dpi=150)
| mit |
jfitzgerald79/gis-1 | sjoin.py | 2 | 4016 | import geopandas as gpd
from geopandas import tools
import numpy as np
import pandas as pd
import rtree
from shapely import prepared
r_df = gpd.GeoDataFrame.from_file('/home/akagi/GIS/2014_All_Parcel_Shapefiles/2014_Book400.shp')
l_df = gpd.GeoDataFrame.from_file('/home/akagi/GIS/census/cb_2013_04_tract_500k/cb_2013_04_tract_500k.shp')
def fast_sjoin(left_df, right_df, how='left', op='intersects', crs_convert=True, lsuffix='left', rsuffix='right', **kwargs):
"""Spatial join of two GeoDataFrames.
left_df, right_df are GeoDataFrames
how: type of join
left -> use keys from left_df; retain only left_df geometry column
right -> use keys from right_df; retain only right_df geometry column
inner -> use intersection of keys from both dfs; retain only left_df geometry column
op: binary predicate {'intersects', 'contains', 'within'}
see http://toblerity.org/shapely/manual.html#binary-predicates
use_sindex : Use the spatial index to speed up operation? Default is True
kwargs: passed to op method
"""
# CHECK VALIDITY OF JOIN TYPE
allowed_hows = ['left', 'right', 'inner']
if how not in allowed_hows:
raise ValueError("`how` was \"%s\" but is expected to be in %s" % \
(how, allowed_hows))
# CHECK VALIDITY OF PREDICATE OPERATION
allowed_ops = ['contains', 'within', 'intersects']
if op not in allowed_ops:
raise ValueError("`op` was \"%s\" but is expected to be in %s" % \
(op, allowed_ops))
# IF WITHIN, SWAP NAMES
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
# CONVERT CRS IF NOT EQUAL
if left_df.crs != right_df.crs:
print 'Warning: CRS does not match!'
if crs_convert == True:
print 'Converting CRS...'
if left_df.values.nbytes >= right_df.values.nbytes:
right_df = right_df.to_crs(left_df.crs)
elif left_df.values.nbytes < right_df.values.nbytes:
left_df = left_df.to_crs(right_df.crs)
# CONSTRUCT SPATIAL INDEX FOR RIGHT DATAFRAME
tree_idx = rtree.index.Index()
right_df_bounds = right_df['geometry'].apply(lambda x: x.bounds)
for i in right_df_bounds.index:
tree_idx.insert(i, right_df_bounds[i])
# FIND INTERSECTION OF SPATIAL INDEX
idxmatch = left_df['geometry'].apply(lambda x: x.bounds).apply(lambda x: list(tree_idx.intersection(x)))
idxmatch = idxmatch[idxmatch.str.len() > 0]
r_idx = np.concatenate(idxmatch.values)
l_idx = np.concatenate((idxmatch.str.len()*pd.Series([[i] for i in idxmatch.index], index=idxmatch.index)).values)
# VECTORIZE PREDICATE OPERATIONS
def find_intersects(a1, a2):
return a1.intersects(a2)
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {'intersects': find_intersects, 'contains': find_contains, 'within': find_contains}
check_predicates = np.vectorize(predicate_d[op])
# CHECK PREDICATES
result = pd.DataFrame(np.column_stack([l_idx, r_idx, check_predicates(left_df['geometry'].apply(lambda x: prepared.prep(x)).values[l_idx], right_df['geometry'].values[r_idx])]))
result.columns = ['index_%s' % lsuffix, 'index_%s' % rsuffix, 'match_bool']
result = pd.DataFrame(result[result['match_bool']==1].set_index('index_%s' % lsuffix)['index_%s' % rsuffix])
# IF 'WITHIN', SWAP NAMES AGAIN
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.reset_index().rename(columns={'index_%s' % (lsuffix): 'index_%s' % (rsuffix), 'index_%s' % (rsuffix): 'index_%s' % (lsuffix)}).set_index('index_left').sort_index()
# APPLY JOIN
return left_df.merge(result, left_index=True, right_index=True).merge(right_df, left_on='index_%s' % rsuffix, right_index=True, how=how, suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
| gpl-2.0 |
perimosocordiae/scipy | scipy/linalg/basic.py | 4 | 67094 | #
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from warnings import warn
import numpy as np
from numpy import atleast_1d, atleast_2d
from .flinalg import get_flinalg_funcs
from .lapack import get_lapack_funcs, _compute_lwork
from .misc import LinAlgError, _datacopied, LinAlgWarning
from .decomp import _asarray_validated
from . import decomp, decomp_svd
from ._solve_toeplitz import levinson
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinv2', 'pinvh', 'matrix_balance', 'matmul_toeplitz']
# Linear equations
def _solve_check(n, info, lamch=None, rcond=None):
""" Check arguments during the different steps of the solution phase """
if info < 0:
raise ValueError('LAPACK reported an illegal value in {}-th argument'
'.'.format(-info))
elif 0 < info:
raise LinAlgError('Matrix is singular.')
if lamch is None:
return
E = lamch('E')
if rcond < E:
warn('Ill-conditioned matrix (rcond={:.6g}): '
'result may not be accurate.'.format(rcond),
LinAlgWarning, stacklevel=3)
def solve(a, b, sym_pos=False, lower=False, overwrite_a=False,
overwrite_b=False, debug=None, check_finite=True, assume_a='gen',
transposed=False):
"""
Solves the linear equation set ``a * x = b`` for the unknown ``x``
for square ``a`` matrix.
If the data matrix is known to be a particular type then supplying the
corresponding string to ``assume_a`` key chooses the dedicated solver.
The available options are
=================== ========
generic matrix 'gen'
symmetric 'sym'
hermitian 'her'
positive definite 'pos'
=================== ========
If omitted, ``'gen'`` is the default structure.
The datatype of the arrays define which solver is called regardless
of the values. In other words, even when the complex array entries have
precisely zero imaginary parts, the complex solver will be called based
on the data type of the array.
Parameters
----------
a : (N, N) array_like
Square input data
b : (N, NRHS) array_like
Input data for the right hand side.
sym_pos : bool, optional
Assume `a` is symmetric and positive definite. This key is deprecated
and assume_a = 'pos' keyword is recommended instead. The functionality
is the same. It will be removed in the future.
lower : bool, optional
If True, only the data contained in the lower triangle of `a`. Default
is to use upper triangle. (ignored for ``'gen'``)
overwrite_a : bool, optional
Allow overwriting data in `a` (may enhance performance).
Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance).
Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
assume_a : str, optional
Valid entries are explained above.
transposed: bool, optional
If True, ``a^T x = b`` for real matrices, raises `NotImplementedError`
for complex matrices (only for True).
Returns
-------
x : (N, NRHS) ndarray
The solution array.
Raises
------
ValueError
If size mismatches detected or input a is not square.
LinAlgError
If the matrix is singular.
LinAlgWarning
If an ill-conditioned input a is detected.
NotImplementedError
If transposed is True and input a is a complex matrix.
Examples
--------
Given `a` and `b`, solve for `x`:
>>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = np.array([2, 4, -1])
>>> from scipy import linalg
>>> x = linalg.solve(a, b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
Notes
-----
If the input b matrix is a 1-D array with N elements, when supplied
together with an NxN input a, it is assumed as a valid column vector
despite the apparent size mismatch. This is compatible with the
numpy.dot() behavior and the returned result is still 1-D array.
The generic, symmetric, Hermitian and positive definite solutions are
obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of
LAPACK respectively.
"""
# Flags for 1-D or N-D right-hand side
b_is_1D = False
a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite))
b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite))
n = a1.shape[0]
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[0] != a1.shape[1]:
raise ValueError('Input a needs to be a square matrix.')
if n != b1.shape[0]:
# Last chance to catch 1x1 scalar a and 1-D b arrays
if not (n == 1 and b1.size != 0):
raise ValueError('Input b has to have same number of rows as '
'input a')
# accommodate empty arrays
if b1.size == 0:
return np.asfortranarray(b1.copy())
# regularize 1-D b arrays to 2D
if b1.ndim == 1:
if n == 1:
b1 = b1[None, :]
else:
b1 = b1[:, None]
b_is_1D = True
# Backwards compatibility - old keyword.
if sym_pos:
assume_a = 'pos'
if assume_a not in ('gen', 'sym', 'her', 'pos'):
raise ValueError('{} is not a recognized matrix structure'
''.format(assume_a))
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
# Get the correct lamch function.
# The LAMCH functions only exists for S and D
# So for complex values we have to convert to real/double.
if a1.dtype.char in 'fF': # single precision
lamch = get_lapack_funcs('lamch', dtype='f')
else:
lamch = get_lapack_funcs('lamch', dtype='d')
# Currently we do not have the other forms of the norm calculators
# lansy, lanpo, lanhe.
# However, in any case they only reduce computations slightly...
lange = get_lapack_funcs('lange', (a1,))
# Since the I-norm and 1-norm are the same for symmetric matrices
# we can collect them all in this one call
# Note however, that when issuing 'gen' and form!='none', then
# the I-norm should be used
if transposed:
trans = 1
norm = 'I'
if np.iscomplexobj(a1):
raise NotImplementedError('scipy.linalg.solve can currently '
'not solve a^T x = b or a^H x = b '
'for complex matrices.')
else:
trans = 0
norm = '1'
anorm = lange(norm, a1)
# Generalized case 'gesv'
if assume_a == 'gen':
gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'),
(a1, b1))
lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a)
_solve_check(n, info)
x, info = getrs(lu, ipvt, b1,
trans=trans, overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = gecon(lu, anorm, norm=norm)
# Hermitian case 'hesv'
elif assume_a == 'her':
hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv',
'hesv_lwork'), (a1, b1))
lwork = _compute_lwork(hesv_lw, n, lower)
lu, ipvt, x, info = hesv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = hecon(lu, ipvt, anorm)
# Symmetric case 'sysv'
elif assume_a == 'sym':
sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv',
'sysv_lwork'), (a1, b1))
lwork = _compute_lwork(sysv_lw, n, lower)
lu, ipvt, x, info = sysv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = sycon(lu, ipvt, anorm)
# Positive definite case 'posv'
else:
pocon, posv = get_lapack_funcs(('pocon', 'posv'),
(a1, b1))
lu, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = pocon(lu, anorm)
_solve_check(n, info, lamch, rcond)
if b_is_1D:
x = x.ravel()
return x
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, debug=None, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
Examples
--------
Solve the lower triangular system a x = b, where::
[3 0 0 0] [4]
a = [2 1 0 0] b = [2]
[1 0 1 0] [4]
[1 1 1 1] [2]
>>> from scipy.linalg import solve_triangular
>>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
>>> b = np.array([4, 2, 4, 2])
>>> x = solve_triangular(a, b, lower=True)
>>> x
array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333])
>>> a.dot(x) # Check the result
array([ 4., 2., 4., 2.])
"""
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('shapes of a {} and b {} are incompatible'
.format(a1.shape, b1.shape))
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_b=', overwrite_b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
if a1.flags.f_contiguous or trans == 2:
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
else:
# transposed system is solved since trtrs expects Fortran ordering
x, info = trtrs(a1.T, b1, overwrite_b=overwrite_b, lower=not lower,
trans=not trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
(info-1))
raise ValueError('illegal value in %dth argument of internal trtrs' %
(-info))
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
debug=None, check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
Examples
--------
Solve the banded system a x = b, where::
[5 2 -1 0 0] [0]
[1 4 2 -1 0] [1]
a = [0 1 3 2 -1] b = [2]
[0 0 1 2 2] [2]
[0 0 0 1 1] [3]
There is one nonzero diagonal below the main diagonal (l = 1), and
two above (u = 2). The diagonal banded form of the matrix is::
[* * -1 -1 -1]
ab = [* 2 2 2 2]
[5 4 3 2 1]
[1 1 1 1 *]
>>> from scipy.linalg import solve_banded
>>> ab = np.array([[0, 0, -1, -1, -1],
... [0, 2, 2, 2, 2],
... [5, 4, 3, 2, 1],
... [1, 1, 1, 1, 0]])
>>> b = np.array([0, 1, 2, 2, 3])
>>> x = solve_banded((1, 2), ab, b)
>>> x
array([-2.37288136, 3.93220339, -4. , 4.3559322 , -1.3559322 ])
"""
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
(nlower, nupper) = l_and_u
if nlower + nupper + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper "
"diagonals: l+u+1 (%d) does not equal ab.shape[0] "
"(%d)" % (nlower + nupper + 1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[-1] == 1:
b2 = np.array(b1, copy=(not overwrite_b))
b2 /= a1[1, 0]
return b2
if nlower == nupper == 1:
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
du = a1[0, 1:]
d = a1[1, :]
dl = a1[2, :-1]
du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
overwrite_ab, overwrite_b)
else:
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*nlower + nupper + 1, a1.shape[1]), dtype=gbsv.dtype)
a2[nlower:, :] = a1
lu, piv, x, info = gbsv(nlower, nupper, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal '
'gbsv/gtsv' % -info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
The matrix a is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of a is (6, 6), `u` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (`u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Shape of return matches shape
of `b`.
Examples
--------
Solve the banded system A x = b, where::
[ 4 2 -1 0 0 0] [1]
[ 2 5 2 -1 0 0] [2]
A = [-1 2 6 2 -1 0] b = [2]
[ 0 -1 2 7 2 -1] [3]
[ 0 0 -1 2 8 2] [3]
[ 0 0 0 -1 2 9] [3]
>>> from scipy.linalg import solveh_banded
`ab` contains the main diagonal and the nonzero diagonals below the
main diagonal. That is, we use the lower form:
>>> ab = np.array([[ 4, 5, 6, 7, 8, 9],
... [ 2, 2, 2, 2, 2, 0],
... [-1, -1, -1, -1, 0, 0]])
>>> b = np.array([1, 2, 2, 3, 3, 3])
>>> x = solveh_banded(ab, b, lower=True)
>>> x
array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031,
0.34733894])
Solve the Hermitian banded system H x = b, where::
[ 8 2-1j 0 0 ] [ 1 ]
H = [2+1j 5 1j 0 ] b = [1+1j]
[ 0 -1j 9 -2-1j] [1-2j]
[ 0 0 -2+1j 6 ] [ 0 ]
In this example, we put the upper diagonals in the array `hb`:
>>> hb = np.array([[0, 2-1j, 1j, -2-1j],
... [8, 5, 9, 6 ]])
>>> b = np.array([1, 1+1j, 1-2j, 0])
>>> x = solveh_banded(hb, b)
>>> x
array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j,
0.10077984-0.23035393j, -0.00479904-0.09358128j])
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
overwrite_b = overwrite_b or _datacopied(b1, b)
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
if a1.shape[0] == 2:
ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
if lower:
d = a1[0, :].real
e = a1[1, :-1]
else:
d = a1[1, :].real
e = a1[0, 1:].conj()
d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
overwrite_b)
else:
pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%dth leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %dth argument of internal '
'pbsv' % -info)
return x
def solve_toeplitz(c_or_cr, b, check_finite=True):
"""Solve a Toeplitz system using Levinson Recursion
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``T x = b``. Shape of return matches shape
of `b`.
See Also
--------
toeplitz : Toeplitz matrix
Notes
-----
The solution is computed using Levinson-Durbin recursion, which is faster
than generic least-squares methods, but can be less numerically stable.
Examples
--------
Solve the Toeplitz system T x = b, where::
[ 1 -1 -2 -3] [1]
T = [ 3 1 -1 -2] b = [2]
[ 6 3 1 -1] [2]
[10 6 3 1] [5]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> b = np.array([1, 2, 2, 5])
>>> from scipy.linalg import solve_toeplitz, toeplitz
>>> x = solve_toeplitz((c, r), b)
>>> x
array([ 1.66666667, -1. , -2.66666667, 2.33333333])
Check the result by creating the full Toeplitz matrix and
multiplying it by `x`. We should get `b`.
>>> T = toeplitz(c, r)
>>> T.dot(x)
array([ 1., 2., 2., 5.])
"""
# If numerical stability of this algorithm is a problem, a future
# developer might consider implementing other O(N^2) Toeplitz solvers,
# such as GKO (https://www.jstor.org/stable/2153371) or Bareiss.
r, c, b, dtype, b_shape = _validate_args_for_toeplitz_ops(
c_or_cr, b, check_finite, keep_b_shape=True)
# Form a 1-D array of values to be used in the matrix, containing a
# reversed copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
if b is None:
raise ValueError('illegal value, `b` is a required argument')
if b.ndim == 1:
x, _ = levinson(vals, np.ascontiguousarray(b))
else:
x = np.column_stack([levinson(vals, np.ascontiguousarray(b[:, i]))[0]
for i in range(b.shape[1])])
x = x.reshape(*b_shape)
return x
def _get_axis_len(aname, a, axis):
ax = axis
if ax < 0:
ax += a.ndim
if 0 <= ax < a.ndim:
return a.shape[ax]
raise ValueError("'%saxis' entry is out of bounds" % (aname,))
def solve_circulant(c, b, singular='raise', tol=None,
caxis=-1, baxis=0, outaxis=0):
"""Solve C x = b for x, where C is a circulant matrix.
`C` is the circulant matrix associated with the vector `c`.
The system is solved by doing division in Fourier space. The
calculation is::
x = ifft(fft(b) / fft(c))
where `fft` and `ifft` are the fast Fourier transform and its inverse,
respectively. For a large vector `c`, this is *much* faster than
solving the system with the full circulant matrix.
Parameters
----------
c : array_like
The coefficients of the circulant matrix.
b : array_like
Right-hand side matrix in ``a x = b``.
singular : str, optional
This argument controls how a near singular circulant matrix is
handled. If `singular` is "raise" and the circulant matrix is
near singular, a `LinAlgError` is raised. If `singular` is
"lstsq", the least squares solution is returned. Default is "raise".
tol : float, optional
If any eigenvalue of the circulant matrix has an absolute value
that is less than or equal to `tol`, the matrix is considered to be
near singular. If not given, `tol` is set to::
tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
where `abs_eigs` is the array of absolute values of the eigenvalues
of the circulant matrix.
caxis : int
When `c` has dimension greater than 1, it is viewed as a collection
of circulant vectors. In this case, `caxis` is the axis of `c` that
holds the vectors of circulant coefficients.
baxis : int
When `b` has dimension greater than 1, it is viewed as a collection
of vectors. In this case, `baxis` is the axis of `b` that holds the
right-hand side vectors.
outaxis : int
When `c` or `b` are multidimensional, the value returned by
`solve_circulant` is multidimensional. In this case, `outaxis` is
the axis of the result that holds the solution vectors.
Returns
-------
x : ndarray
Solution to the system ``C x = b``.
Raises
------
LinAlgError
If the circulant matrix associated with `c` is near singular.
See Also
--------
circulant : circulant matrix
Notes
-----
For a 1-D vector `c` with length `m`, and an array `b`
with shape ``(m, ...)``,
solve_circulant(c, b)
returns the same result as
solve(circulant(c), b)
where `solve` and `circulant` are from `scipy.linalg`.
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
>>> c = np.array([2, 2, 4])
>>> b = np.array([1, 2, 3])
>>> solve_circulant(c, b)
array([ 0.75, -0.25, 0.25])
Compare that result to solving the system with `scipy.linalg.solve`:
>>> solve(circulant(c), b)
array([ 0.75, -0.25, 0.25])
A singular example:
>>> c = np.array([1, 1, 0, 0])
>>> b = np.array([1, 2, 3, 4])
Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
least square solution, use the option ``singular='lstsq'``:
>>> solve_circulant(c, b, singular='lstsq')
array([ 0.25, 1.25, 2.25, 1.25])
Compare to `scipy.linalg.lstsq`:
>>> x, resid, rnk, s = lstsq(circulant(c), b)
>>> x
array([ 0.25, 1.25, 2.25, 1.25])
A broadcasting example:
Suppose we have the vectors of two circulant matrices stored in an array
with shape (2, 5), and three `b` vectors stored in an array with shape
(3, 5). For example,
>>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
>>> b = np.arange(15).reshape(-1, 5)
We want to solve all combinations of circulant matrices and `b` vectors,
with the result stored in an array with shape (2, 3, 5). When we
disregard the axes of `c` and `b` that hold the vectors of coefficients,
the shapes of the collections are (2,) and (3,), respectively, which are
not compatible for broadcasting. To have a broadcast result with shape
(2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
shape (2, 1, 5). The last dimension holds the coefficients of the
circulant matrices, so when we call `solve_circulant`, we can use the
default ``caxis=-1``. The coefficients of the `b` vectors are in the last
dimension of the array `b`, so we use ``baxis=-1``. If we use the
default `outaxis`, the result will have shape (5, 2, 3), so we'll use
``outaxis=-1`` to put the solution vectors in the last dimension.
>>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
>>> x.shape
(2, 3, 5)
>>> np.set_printoptions(precision=3) # For compact output of numbers.
>>> x
array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
[ 0.651, 0.989, 2.046, 0.627, 1.072],
[ 1.42 , 1.758, 2.816, 1.396, 1.841]],
[[ 0.401, 0.304, 0.694, -0.867, 0.377],
[ 0.856, 0.758, 1.149, -0.412, 0.831],
[ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
>>> solve_circulant(c[1], b[1, :])
array([ 0.856, 0.758, 1.149, -0.412, 0.831])
"""
c = np.atleast_1d(c)
nc = _get_axis_len("c", c, caxis)
b = np.atleast_1d(b)
nb = _get_axis_len("b", b, baxis)
if nc != nb:
raise ValueError('Shapes of c {} and b {} are incompatible'
.format(c.shape, b.shape))
fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1)
abs_fc = np.abs(fc)
if tol is None:
# This is the same tolerance as used in np.linalg.matrix_rank.
tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
if tol.shape != ():
tol.shape = tol.shape + (1,)
else:
tol = np.atleast_1d(tol)
near_zeros = abs_fc <= tol
is_near_singular = np.any(near_zeros)
if is_near_singular:
if singular == 'raise':
raise LinAlgError("near singular circulant matrix.")
else:
# Replace the small values with 1 to avoid errors in the
# division fb/fc below.
fc[near_zeros] = 1
fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1)
q = fb / fc
if is_near_singular:
# `near_zeros` is a boolean array, same shape as `c`, that is
# True where `fc` is (near) zero. `q` is the broadcasted result
# of fb / fc, so to set the values of `q` to 0 where `fc` is near
# zero, we use a mask that is the broadcast result of an array
# of True values shaped like `b` with `near_zeros`.
mask = np.ones_like(b, dtype=bool) & near_zeros
q[mask] = 0
x = np.fft.ifft(q, axis=-1)
if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
x = x.real
if outaxis != -1:
x = np.rollaxis(x, -1, outaxis)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square, or not 2D.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1., 2.], [3., 4.]])
>>> linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
# XXX: I found no advantage or disadvantage of using finv.
# finv, = get_flinalg_funcs(('inv',),(a1,))
# if finv is not None:
# a_inv,info = finv(a1,overwrite_a=overwrite_a)
# if info==0:
# return a_inv
# if info>0: raise LinAlgError, "singular matrix"
# if info<0: raise ValueError('illegal value in %d-th argument of '
# 'internal inv.getrf|getri'%(-info))
getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
'getri_lwork'),
(a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork = _compute_lwork(getri_lwork, a1.shape[0])
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
# Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant of a square matrix is a value derived arithmetically
from the coefficients of the matrix.
The determinant for a 3x3 matrix, for example, is computed as follows::
a b c
d e f = A
g h i
det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
Parameters
----------
a : (M, M) array_like
A square matrix.
overwrite_a : bool, optional
Allow overwriting data in a (may enhance performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : float or complex
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
0.0
>>> a = np.array([[0,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
3.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
fdet, = get_flinalg_funcs(('det',), (a1,))
a_det, info = fdet(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'det.getrf' % -info)
return a_det
# Linear Least Squares
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True, lapack_driver=None):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left-hand side array
b : (M,) or (M, K) array_like
Right hand side array
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``rcond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver : str, optional
Which LAPACK driver is used to solve the least-squares problem.
Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
(``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly
faster on many problems. ``'gelss'`` was used historically. It is
generally slow but uses less memory.
.. versionadded:: 0.17.0
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution. Return shape matches shape of `b`.
residues : (K,) ndarray or float
Square of the 2-norm for each column in ``b - a x``, if ``M > N`` and
``ndim(A) == n`` (returns a scalar if b is 1-D). Otherwise a
(0,)-shaped array is returned.
rank : int
Effective rank of `a`.
s : (min(M, N),) ndarray or None
Singular values of `a`. The condition number of a is
``abs(s[0] / s[-1])``.
Raises
------
LinAlgError
If computation does not converge.
ValueError
When parameters are not compatible.
See Also
--------
scipy.optimize.nnls : linear least squares with non-negativity constraint
Notes
-----
When ``'gelsy'`` is used as a driver, `residues` is set to a (0,)-shaped
array and `s` is always ``None``.
Examples
--------
>>> from scipy.linalg import lstsq
>>> import matplotlib.pyplot as plt
Suppose we have the following data:
>>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5])
>>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6])
We want to fit a quadratic polynomial of the form ``y = a + b*x**2``
to this data. We first form the "design matrix" M, with a constant
column of 1s and a column containing ``x**2``:
>>> M = x[:, np.newaxis]**[0, 2]
>>> M
array([[ 1. , 1. ],
[ 1. , 6.25],
[ 1. , 12.25],
[ 1. , 16. ],
[ 1. , 25. ],
[ 1. , 49. ],
[ 1. , 72.25]])
We want to find the least-squares solution to ``M.dot(p) = y``,
where ``p`` is a vector with length 2 that holds the parameters
``a`` and ``b``.
>>> p, res, rnk, s = lstsq(M, y)
>>> p
array([ 0.20925829, 0.12013861])
Plot the data and the fitted curve.
>>> plt.plot(x, y, 'o', label='data')
>>> xx = np.linspace(0, 9, 101)
>>> yy = p[0] + p[1]*xx**2
>>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$')
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('Input array a should be 2D')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('Shape mismatch: a and b should have the same number'
' of rows ({} != {}).'.format(m, b1.shape[0]))
if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK
x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1))
if n == 0:
residues = np.linalg.norm(b1, axis=0)**2
else:
residues = np.empty((0,))
return x, residues, 0, np.empty((0,))
driver = lapack_driver
if driver is None:
driver = lstsq.default_lapack_driver
if driver not in ('gelsd', 'gelsy', 'gelss'):
raise ValueError('LAPACK driver "%s" is not found' % driver)
lapack_func, lapack_lwork = get_lapack_funcs((driver,
'%s_lwork' % driver),
(a1, b1))
real_data = True if (lapack_func.dtype.kind == 'f') else False
if m < n:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
b2[:m, :] = b1
else:
b2 = np.zeros(n, dtype=lapack_func.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if cond is None:
cond = np.finfo(lapack_func.dtype).eps
if driver in ('gelss', 'gelsd'):
if driver == 'gelss':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
elif driver == 'gelsd':
if real_data:
lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork,
iwork, cond, False, False)
else: # complex data
lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
cond, False, False)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal %s'
% (-info, lapack_driver))
resids = np.asarray([], dtype=x.dtype)
if m > n:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
elif driver == 'gelsy':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
lwork, False, False)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal "
"gelsy" % -info)
if m > n:
x1 = x[:n]
x = x1
return x, np.array([], x.dtype), rank, None
lstsq.default_lapack_driver = 'gelsd'
def pinv(a, atol=None, rtol=None, return_rank=False, check_finite=True,
cond=None, rcond=None):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition ``U @ S @ V`` in the economy mode and picking
up only the columns/rows that are associated with significant singular
values.
If ``s`` is the maximum singular value of ``a``, then the
significance cut-off value is determined by ``atol + rtol * s``. Any
singular value below this value is assumed insignificant.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
atol: float, optional
Absolute threshold term, default value is 0.
.. versionadded:: 1.7.0
rtol: float, optional
Relative threshold term, default value is ``max(M, N) * eps`` where
``eps`` is the machine precision value of the datatype of ``a``.
.. versionadded:: 1.7.0
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
cond, rcond : float, optional
In older versions, these values were meant to be used as ``atol`` with
``rtol=0``. If both were given ``rcond`` overwrote ``cond`` and hence
the code was not correct. Thus using these are strongly discouraged and
the tolerances above are recommended instead. In fact, if provided,
atol, rtol takes precedence over these keywords.
.. versionchanged:: 1.7.0
Deprecated in favor of ``rtol`` and ``atol`` parameters above and
will be removed in future versions of SciPy.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> rng = np.random.default_rng()
>>> a = rng.standard_normal((9, 6))
>>> B = linalg.pinv(a)
>>> np.allclose(a, a @ B @ a)
True
>>> np.allclose(B, B @ a @ B)
True
"""
a = _asarray_validated(a, check_finite=check_finite)
u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False)
t = u.dtype.char.lower()
maxS = np.max(s)
if rcond or cond:
warn('Use of the "cond" and "rcond" keywords are deprecated and '
'will be removed in future versions of SciPy. Use "atol" and '
'"rtol" keywords instead', DeprecationWarning, stacklevel=2)
# backwards compatible only atol and rtol are both missing
if (rcond or cond) and (atol is None) and (rtol is None):
atol = rcond or cond
rtol = 0.
atol = 0. if atol is None else atol
rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
if (atol < 0.) or (rtol < 0.):
raise ValueError("atol and rtol values must be positive.")
val = atol + maxS * rtol
rank = np.sum(s > val)
u = u[:, :rank]
u /= s[:rank]
B = (u @ vh[:rank]).conj().T
if return_rank:
return B, rank
else:
return B
def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
`scipy.linalg.pinv2` is deprecated since SciPy 1.7.0, use
`scipy.linalg.pinv` instead for better tolerance control.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float or None
Cutoff for 'small' singular values; singular values smaller than this
value are considered as zero. If both are omitted, the default value
``max(M,N)*largest_singular_value*eps`` is used where ``eps`` is the
machine precision value of the datatype of ``a``.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
"""
# SciPy 1.7.0 2021-04-10
warn('scipy.linalg.pinv2 is deprecated since SciPy 1.7.0, use '
'scipy.linalg.pinv instead', DeprecationWarning, stacklevel=2)
if rcond is not None:
cond = rcond
return pinv(a=a, atol=cond, rtol=None, return_rank=return_rank,
check_finite=check_finite)
def pinvh(a, atol=None, rtol=None, lower=True, return_rank=False,
check_finite=True, cond=None, rcond=None):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a copmlex Hermitian/real symmetric
matrix using its eigenvalue decomposition and including all eigenvalues
with 'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
atol: float, optional
Absolute threshold term, default value is 0.
.. versionadded:: 1.7.0
rtol: float, optional
Relative threshold term, default value is ``N * eps`` where
``eps`` is the machine precision value of the datatype of ``a``.
.. versionadded:: 1.7.0
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of `a`. (Default: lower)
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
cond, rcond : float, optional
In older versions, these values were meant to be used as ``atol`` with
``rtol=0``. If both were given ``rcond`` overwrote ``cond`` and hence
the code was not correct. Thus using these are strongly discouraged and
the tolerances above are recommended instead. In fact, if provided,
atol, rtol takes precedence over these keywords.
.. versionchanged:: 1.7.0
Deprecated in favor of ``rtol`` and ``atol`` parameters above and
will be removed in future versions of SciPy.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If eigenvalue algorithm does not converge.
Examples
--------
>>> from scipy.linalg import pinvh
>>> rng = np.random.default_rng()
>>> a = rng.standard_normal((9, 6))
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, a @ B @ a)
True
>>> np.allclose(B, B @ a @ B)
True
"""
a = _asarray_validated(a, check_finite=check_finite)
s, u = decomp.eigh(a, lower=lower, check_finite=False)
t = u.dtype.char.lower()
maxS = np.max(np.abs(s))
if rcond or cond:
warn('Use of the "cond" and "rcond" keywords are deprecated and '
'will be removed in future versions of SciPy. Use "atol" and '
'"rtol" keywords instead', DeprecationWarning, stacklevel=2)
# backwards compatible only atol and rtol are both missing
if (rcond or cond) and (atol is None) and (rtol is None):
atol = rcond or cond
rtol = 0.
atol = 0. if atol is None else atol
rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
if (atol < 0.) or (rtol < 0.):
raise ValueError("atol and rtol values must be positive.")
val = atol + maxS * rtol
above_cutoff = (abs(s) > val)
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = (u * psigma_diag) @ u.conj().T
if return_rank:
return B, len(psigma_diag)
else:
return B
def matrix_balance(A, permute=True, scale=True, separate=False,
overwrite_a=False):
"""
Compute a diagonal similarity transformation for row/column balancing.
The balancing tries to equalize the row and column 1-norms by applying
a similarity transformation such that the magnitude variation of the
matrix entries is reflected to the scaling matrices.
Moreover, if enabled, the matrix is first permuted to isolate the upper
triangular parts of the matrix and, again if scaling is also enabled,
only the remaining subblocks are subjected to scaling.
The balanced matrix satisfies the following equality
.. math::
B = T^{-1} A T
The scaling coefficients are approximated to the nearest power of 2
to avoid round-off errors.
Parameters
----------
A : (n, n) array_like
Square data matrix for the balancing.
permute : bool, optional
The selector to define whether permutation of A is also performed
prior to scaling.
scale : bool, optional
The selector to turn on and off the scaling. If False, the matrix
will not be scaled.
separate : bool, optional
This switches from returning a full matrix of the transformation
to a tuple of two separate 1-D permutation and scaling arrays.
overwrite_a : bool, optional
This is passed to xGEBAL directly. Essentially, overwrites the result
to the data. It might increase the space efficiency. See LAPACK manual
for details. This is False by default.
Returns
-------
B : (n, n) ndarray
Balanced matrix
T : (n, n) ndarray
A possibly permuted diagonal matrix whose nonzero entries are
integer powers of 2 to avoid numerical truncation errors.
scale, perm : (n,) ndarray
If ``separate`` keyword is set to True then instead of the array
``T`` above, the scaling and the permutation vectors are given
separately as a tuple without allocating the full array ``T``.
Notes
-----
This algorithm is particularly useful for eigenvalue and matrix
decompositions and in many cases it is already called by various
LAPACK routines.
The algorithm is based on the well-known technique of [1]_ and has
been modified to account for special cases. See [2]_ for details
which have been implemented since LAPACK v3.5.0. Before this version
there are corner cases where balancing can actually worsen the
conditioning. See [3]_ for such examples.
The code is a wrapper around LAPACK's xGEBAL routine family for matrix
balancing.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy import linalg
>>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]])
>>> y, permscale = linalg.matrix_balance(x)
>>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1)
array([ 3.66666667, 0.4995005 , 0.91312162])
>>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1)
array([ 1.2 , 1.27041742, 0.92658316]) # may vary
>>> permscale # only powers of 2 (0.5 == 2^(-1))
array([[ 0.5, 0. , 0. ], # may vary
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]])
References
----------
.. [1] : B.N. Parlett and C. Reinsch, "Balancing a Matrix for
Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik,
Vol.13(4), 1969, :doi:`10.1007/BF02165404`
.. [2] : R. James, J. Langou, B.R. Lowery, "On matrix balancing and
eigenvector computation", 2014, :arxiv:`1401.5766`
.. [3] : D.S. Watkins. A case where balancing is harmful.
Electron. Trans. Numer. Anal, Vol.23, 2006.
"""
A = np.atleast_2d(_asarray_validated(A, check_finite=True))
if not np.equal(*A.shape):
raise ValueError('The data matrix for balancing should be square.')
gebal = get_lapack_funcs(('gebal'), (A,))
B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute,
overwrite_a=overwrite_a)
if info < 0:
raise ValueError('xGEBAL exited with the internal error '
'"illegal value in argument number {}.". See '
'LAPACK documentation for the xGEBAL error codes.'
''.format(-info))
# Separate the permutations from the scalings and then convert to int
scaling = np.ones_like(ps, dtype=float)
scaling[lo:hi+1] = ps[lo:hi+1]
# gebal uses 1-indexing
ps = ps.astype(int, copy=False) - 1
n = A.shape[0]
perm = np.arange(n)
# LAPACK permutes with the ordering n --> hi, then 0--> lo
if hi < n:
for ind, x in enumerate(ps[hi+1:][::-1], 1):
if n-ind == x:
continue
perm[[x, n-ind]] = perm[[n-ind, x]]
if lo > 0:
for ind, x in enumerate(ps[:lo]):
if ind == x:
continue
perm[[x, ind]] = perm[[ind, x]]
if separate:
return B, (scaling, perm)
# get the inverse permutation
iperm = np.empty_like(perm)
iperm[perm] = np.arange(n)
return B, np.diag(scaling)[iperm, :]
def _validate_args_for_toeplitz_ops(c_or_cr, b, check_finite, keep_b_shape,
enforce_square=True):
"""Validate arguments and format inputs for toeplitz functions
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
keep_b_shape: bool
Whether to convert a (M,) dimensional b into a (M, 1) dimensional
matrix.
enforce_square: bool, optional
If True (default), this verifies that the Toeplitz matrix is square.
Returns
-------
r : array
1d array corresponding to the first row of the Toeplitz matrix.
c: array
1d array corresponding to the first column of the Toeplitz matrix.
b: array
(M,), (M, 1) or (M, K) dimensional array, post validation,
corresponding to ``b``.
dtype: numpy datatype
``dtype`` stores the datatype of ``r``, ``c`` and ``b``. If any of
``r``, ``c`` or ``b`` are complex, ``dtype`` is ``np.complex128``,
otherwise, it is ``np.float``.
b_shape: tuple
Shape of ``b`` after passing it through ``_asarray_validated``.
"""
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
if b is None:
raise ValueError('`b` must be an array, not None.')
b = _asarray_validated(b, check_finite=check_finite)
b_shape = b.shape
is_not_square = r.shape[0] != c.shape[0]
if (enforce_square and is_not_square) or b.shape[0] != r.shape[0]:
raise ValueError('Incompatible dimensions.')
is_cmplx = np.iscomplexobj(r) or np.iscomplexobj(c) or np.iscomplexobj(b)
dtype = np.complex128 if is_cmplx else np.double
r, c, b = (np.asarray(i, dtype=dtype) for i in (r, c, b))
if b.ndim == 1 and not keep_b_shape:
b = b.reshape(-1, 1)
elif b.ndim != 1:
b = b.reshape(b.shape[0], -1)
return r, c, b, dtype, b_shape
def matmul_toeplitz(c_or_cr, x, check_finite=False, workers=None):
"""Efficient Toeplitz Matrix-Matrix Multiplication using FFT
This function returns the matrix multiplication between a Toeplitz
matrix and a dense matrix.
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
x : (M,) or (M, K) array_like
Matrix with which to multiply.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
workers : int, optional
To pass to scipy.fft.fft and ifft. Maximum number of workers to use
for parallel computation. If negative, the value wraps around from
``os.cpu_count()``. See scipy.fft.fft for more details.
Returns
-------
T @ x : (M,) or (M, K) ndarray
The result of the matrix multiplication ``T @ x``. Shape of return
matches shape of `x`.
See Also
--------
toeplitz : Toeplitz matrix
solve_toeplitz : Solve a Toeplitz system using Levinson Recursion
Notes
-----
The Toeplitz matrix is embedded in a circulant matrix and the FFT is used
to efficiently calculate the matrix-matrix product.
Because the computation is based on the FFT, integer inputs will
result in floating point outputs. This is unlike NumPy's `matmul`,
which preserves the data type of the input.
This is partly based on the implementation that can be found in [1]_,
licensed under the MIT license. More information about the method can be
found in reference [2]_. References [3]_ and [4]_ have more reference
implementations in Python.
.. versionadded:: 1.6.0
References
----------
.. [1] Jacob R Gardner, Geoff Pleiss, David Bindel, Kilian
Q Weinberger, Andrew Gordon Wilson, "GPyTorch: Blackbox Matrix-Matrix
Gaussian Process Inference with GPU Acceleration" with contributions
from Max Balandat and Ruihan Wu. Available online:
https://github.com/cornellius-gp/gpytorch
.. [2] J. Demmel, P. Koev, and X. Li, "A Brief Survey of Direct Linear
Solvers". In Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der
Vorst, editors. Templates for the Solution of Algebraic Eigenvalue
Problems: A Practical Guide. SIAM, Philadelphia, 2000. Available at:
http://www.netlib.org/utk/people/JackDongarra/etemplates/node384.html
.. [3] R. Scheibler, E. Bezzam, I. Dokmanic, Pyroomacoustics: A Python
package for audio room simulations and array processing algorithms,
Proc. IEEE ICASSP, Calgary, CA, 2018.
https://github.com/LCAV/pyroomacoustics/blob/pypi-release/
pyroomacoustics/adaptive/util.py
.. [4] Marano S, Edwards B, Ferrari G and Fah D (2017), "Fitting
Earthquake Spectra: Colored Noise and Incomplete Data", Bulletin of
the Seismological Society of America., January, 2017. Vol. 107(1),
pp. 276-291.
Examples
--------
Multiply the Toeplitz matrix T with matrix x::
[ 1 -1 -2 -3] [1 10]
T = [ 3 1 -1 -2] x = [2 11]
[ 6 3 1 -1] [2 11]
[10 6 3 1] [5 19]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> x = np.array([[1, 10], [2, 11], [2, 11], [5, 19]])
>>> from scipy.linalg import toeplitz, matmul_toeplitz
>>> matmul_toeplitz((c, r), x)
array([[-20., -80.],
[ -7., -8.],
[ 9., 85.],
[ 33., 218.]])
Check the result by creating the full Toeplitz matrix and
multiplying it by ``x``.
>>> toeplitz(c, r) @ x
array([[-20, -80],
[ -7, -8],
[ 9, 85],
[ 33, 218]])
The full matrix is never formed explicitly, so this routine
is suitable for very large Toeplitz matrices.
>>> n = 1000000
>>> matmul_toeplitz([1] + [0]*(n-1), np.ones(n))
array([1., 1., 1., ..., 1., 1., 1.])
"""
from ..fft import fft, ifft, rfft, irfft
r, c, x, dtype, x_shape = _validate_args_for_toeplitz_ops(
c_or_cr, x, check_finite, keep_b_shape=False, enforce_square=False)
n, m = x.shape
T_nrows = len(c)
T_ncols = len(r)
p = T_nrows + T_ncols - 1 # equivalent to len(embedded_col)
embedded_col = np.concatenate((c, r[-1:0:-1]))
if np.iscomplexobj(embedded_col) or np.iscomplexobj(x):
fft_mat = fft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
fft_x = fft(x, n=p, axis=0, workers=workers)
mat_times_x = ifft(fft_mat*fft_x, axis=0,
workers=workers)[:T_nrows, :]
else:
# Real inputs; using rfft is faster
fft_mat = rfft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
fft_x = rfft(x, n=p, axis=0, workers=workers)
mat_times_x = irfft(fft_mat*fft_x, axis=0,
workers=workers, n=p)[:T_nrows, :]
return_shape = (T_nrows,) if len(x_shape) == 1 else (T_nrows, m)
return mat_times_x.reshape(*return_shape)
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/neighbors/unsupervised.py | 16 | 3198 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
array([[2]])
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
| bsd-3-clause |
phase4ground/DVB-receiver | modem/hdl/library/apsk_modulator/test/dut_control_apsk_modulator.py | 1 | 12998 | import cocotb
from cocotb.clock import Clock
from cocotb.triggers import Timer
from cocotb.triggers import RisingEdge
from cocotb.result import TestFailure
from cocotb.drivers.amba import AXI4LiteMaster
from cocotb.drivers.amba import AXI4StreamMaster
from cocotb.drivers.amba import AXI4StreamSlave
import sys, json
sys.path.insert(0, '../../../../python/cocotb')
from dut_control import dut_control
sys.path.insert(0, '../../../../python/library')
import comms_filters
import generic_modem
import numpy as np
import matplotlib.pyplot as plt
class dut_control_apsk_modulator(dut_control):
"""
A class which contains variety of useful and reused functions and
capabilities to control a DUT
"""
def __init__(self, dut):
"""
Initialise the object
"""
# perform parent initilisation first
dut_control.__init__(self, dut)
# set simulation parameters
self.DEBUG = False
self.PLOT = False
self.NUMBER_REPEAT_TESTS = 3
# set system parameters
self.INPUT_WIDTH = 32
self.NUMBER_TAPS = 40
self.SAMPLES_PER_SYMBOL = 4
self.DATA_WIDTH = 16
self.COEFFS_WIDTH = 16
self.MEMORY_DEPTH = 256
# test parameters
self.POWER_TOLERANCE = 0.5
self.SAMPLE_TOLERANCE = 1000.0/(2**self.DATA_WIDTH)
self.LENGTH_TOLERANCE = 4
# read in the modulation deifnition file
self.modulation_definition_filename = "../../../../python/library/DVB-S2X_constellations.json"
with open(self.modulation_definition_filename) as json_file:
self.modulation_definition = json.load(json_file)
@cocotb.coroutine
def wait(self, wait_period):
"""
Wait for a given number of data in clock periods.
"""
yield self.clk_wait(self.data_in_clk_rising, wait_period)
@cocotb.coroutine
def reset(self):
"""
Reset the DUT.
"""
# reset the DUT
self.dut.data_in_aresetn = 0
self.dut.data_out_aresetn = 0
self.dut.coefficients_in_aresetn = 0
self.dut.control_aresetn = 0
yield self.wait(2)
self.dut.data_in_aresetn = 1
self.dut.data_out_aresetn = 1
self.dut.coefficients_in_aresetn = 1
self.dut.control_aresetn = 1
self.GSR_control(self.dut, 0)
yield self.wait(2)
@cocotb.coroutine
def clock_start(self):
"""
Startup the clock required for the DUT.
"""
self.dut._log.info("No independant clock used in design")
yield Timer(0)
@cocotb.coroutine
def setup_interfaces(self):
"""
Setup the DUT interfaces.
"""
# input data interface
self.data_in_clk_gen = cocotb.fork(Clock(self.dut.data_in_aclk, self.CLK_PERIOD).start())
self.axism_data_in = AXI4StreamMaster(self.dut, "data_in", self.dut.data_in_aclk)
# output data interface
self.data_out_clk_gen = cocotb.fork(Clock(self.dut.data_out_aclk, self.CLK_PERIOD).start())
self.axiss_data_out = AXI4StreamSlave(self.dut, "data_out", self.dut.data_out_aclk)
# input data interface
self.lut_data_load_clk_gen = cocotb.fork(Clock(self.dut.lut_data_load_aclk, self.CLK_PERIOD).start())
self.axism_lut_data_load = AXI4StreamMaster(self.dut, "lut_data_load", self.dut.lut_data_load_aclk)
# coefficients interface
self.coefficients_in_clk_gen = cocotb.fork(Clock(self.dut.coefficients_in_aclk, self.CLK_PERIOD).start())
self.axism_coeffs_in = AXI4StreamMaster(self.dut, "coefficients_in", self.dut.coefficients_in_aclk)
# control data interface
self.control_clk_gen = cocotb.fork(Clock(self.dut.control_aclk, self.CLK_PERIOD).start())
self.axilm_control = AXI4LiteMaster(self.dut, "control", self.dut.control_aclk)
# use the input data clock
self.data_in_clk_rising = yield RisingEdge(self.dut.data_in_aclk)
yield Timer(0)
@cocotb.coroutine
def init_ports(self):
"""
Set any port initial values.
"""
# initialse values
self.dut.data_in_tdata = 0
self.dut.data_in_tlast = 0
self.dut.data_in_tvalid = 0
self.dut.data_out_tdata = 0
self.dut.data_out_tlast = 0
self.dut.data_out_tvalid = 0
self.dut.lut_data_load_tdata = 0
self.dut.lut_data_load_tlast = 0
self.dut.lut_data_load_tvalid = 0
self.dut.coefficients_in_tdata = 0
self.dut.coefficients_in_tlast = 0
self.dut.coefficients_in_tvalid = 0
yield Timer(0)
@cocotb.coroutine
def set_modulation(self, modulation_name):
"""
Setup the DUT with the selected modulation type.
"""
# store the modulation to internal variable
self.modulation_name = modulation_name
# pull out the modulation parameters and store internally
modulation_dict = self.modulation_definition[self.modulation_name[0]][self.modulation_name[1]]
self.bits_per_symbol = modulation_dict['bits_per_symbol']
self.pulse_filter_type = modulation_dict['filter']
self.constellation_map = modulation_dict['bit_map']
self.relative_rate = modulation_dict['relative_rate']
self.symbol_offset = modulation_dict['offset']
# TODO for now this is hardcoded here - should change
# should this be stored in the modulation JSON file?
self.pulse_factor = 0.5
# write in the coefficients
yield self.coefficients_write()
# write in the input data
yield self.modulation_write()
# write the control signals
yield self.control_signals_write()
@cocotb.coroutine
def coefficients_write(self):
"""
Write coefficients into the DUT.
"""
# create the coefficients
if self.pulse_filter_type == "RRC":
coefficients = comms_filters.rrcosfilter( N = self.NUMBER_TAPS,
alpha = self.pulse_factor,
Ts = 1,
Fs = self.SAMPLES_PER_SYMBOL)[1]
else:
assert False, "Unsupported filter type (%s) specified" % self.pulse_filter_type
# scale the coefficients
coefficients_max = sum(coefficients)
coefficients = [int(coefficient * ((2**(self.COEFFS_WIDTH-1)-1)/coefficients_max)) for coefficient in coefficients]
# if requested plot the filter coefficients
if self.PLOT:
plt.plot(coefficients)
plt.title("Filter Coefficients")
plt.xlabel("Samples")
plt.ylabel("Amplitude")
plt.show()
# convert negative numbers to twos complement and arrange for the polyphase structure
coefficients = self.signed_to_fixedpoint(coefficients, self.COEFFS_WIDTH, multiplier=1.0)
# reset the coefficients
self.dut.coefficients_in_aresetn = 0
yield self.wait(2)
self.dut.coefficients_in_aresetn = 1
yield self.wait(2)
# write the coefficients through the bus
yield self.axism_coeffs_in.write(coefficients)
yield self.wait(1)
@cocotb.coroutine
def modulation_write(self):
"""
Write modulation constellation the DUT memory.
"""
# set an amplitude below one
amplitude = 2**14-1
# amplitude = 1.0
# convert negative numbers to twos complement and arrange for the polyphase structure
i_constellation_map = [_[0] for _ in self.constellation_map]
q_constellation_map = [_[1] for _ in self.constellation_map]
# convert the signed floats to two's complement fixed point form
i_data = self.signed_to_fixedpoint(i_constellation_map, self.DATA_WIDTH, normalised=False, multiplier=amplitude)
q_data = self.signed_to_fixedpoint(q_constellation_map, self.DATA_WIDTH, normalised=False, multiplier=amplitude)
# combine the two constellation maps into a 32 bit number
data = [int(q_data[i]*2**16) + int(i_data[i]) for i in range(len(i_data))]
# reset the modulation
self.dut.lut_data_load_aresetn = 0
yield self.wait(2)
self.dut.lut_data_load_aresetn = 1
yield self.wait(2)
# write the coefficients through the bus
yield self.axism_lut_data_load.write(data)
yield self.wait(1)
@cocotb.coroutine
def control_signals_write(self):
"""
Write the control signals to the DUT.
"""
# form the register as an integer
register_value = self.symbol_offset*2**3 + self.bits_per_symbol
# write the control signal
yield self.axilm_control.write(address=0x00, value=register_value)
yield self.wait(1)
@cocotb.coroutine
def data_out_read_enable(self):
"""
Setup the handle to capture writes into the output data bus.
"""
self.axiss_read_handle = cocotb.fork(self.axiss_data_out.read())
yield self.wait(1)
def data_out_read_parse(self):
"""
Parse the read data into real and imaginary parts.
"""
# split the data into real and imaginary parts
data_read_real = [float((2**self.DATA_WIDTH-1) & int(_)) for _ in self.axiss_data_out.data]
data_read_imag = [float(((2**self.DATA_WIDTH-1)*2**self.DATA_WIDTH) & int(_)) / 2**(self.DATA_WIDTH) for _ in self.axiss_data_out.data]
data_read_real = self.fixedpoint_to_signed(data_read_real, self.DATA_WIDTH, normalised=True)
data_read_imag = self.fixedpoint_to_signed(data_read_imag, self.DATA_WIDTH, normalised=True)
# combine the data read into complex numbers
multiplier = 5.67
self.data_read = [data_read_real[i]*multiplier + 1j*data_read_imag[i]*multiplier for i in range(len(data_read_real))]
@cocotb.coroutine
def data_in_write(self, data):
"""
Write data to the modulator.
"""
# write in the data
yield self.axism_data_in.write(data)
def plot_constellation(self):
"""
Plot the received data on a constellation plot.
"""
# only display if the PLOT flag is enabled
if self.PLOT:
# split the output data into
real = [np.real(_) for _ in self.data_read]
imag = [np.imag(_) for _ in self.data_read]
# plot the constellation
plt.scatter( real, imag )
plt.title("Constellation")
plt.xlabel("I")
plt.ylabel("Q")
plt.show()
def plot_time_domain(self, series):
"""
Plot a time series.
"""
# only display if the PLOT flag is enabled
if self.PLOT:
if type(self.data_read[0]) == complex:
# split the output data into
real = [np.real(_) for _ in self.data_read]
imag = [np.imag(_) for _ in self.data_read]
# plot the constellation
plt.plot( real )
plt.plot( imag )
else:
plt.plot( self.data_read )
# label and render plot
plt.title("Time Series")
plt.xlabel("Samples")
plt.ylabel("Amplitude")
plt.show()
def test_modulation(self, input_data, tolerance=0.001):
"""
Test the received modulated data against the same data
that was modulated using the Python implementation.
"""
# create the python modulator instance
python_modulator = generic_modem.generic_modem( modulation_type = self.modulation_name,
samples_per_symbol = self.SAMPLES_PER_SYMBOL,
pulse_factor = self.pulse_factor,
pulse_length = int(self.NUMBER_TAPS/self.SAMPLES_PER_SYMBOL),
filename = self.modulation_definition_filename)
# covert the data to bits
input_data_bits = []
for number in input_data:
for index in range(self.INPUT_WIDTH):
input_data_bits.append( (number >> index) & 1 )
# modulate the data with the python implemnentation
expected = python_modulator.modulate(input_data_bits)
received = self.data_read
# calculate the number of samples delay to propagate through pipelined registers
delay = int((self.NUMBER_TAPS/self.SAMPLES_PER_SYMBOL-1)*self.SAMPLES_PER_SYMBOL)
# if requested plot the filter coefficients
if self.PLOT:
plt.subplot(211)
plt.plot(np.real(expected))
plt.plot(np.real(received))
plt.subplot(212)
plt.plot(np.imag(expected))
plt.plot(np.imag(received))
plt.legend(["Expected", "Received"])
plt.xlabel("Samples")
plt.ylabel("Amplitude")
plt.show()
# check the lengths match
if abs(len(expected) - len(received)) > self.LENGTH_TOLERANCE:
raise TestFailure("The expected (len = %d) and received (len = %d) signals are of different length!" % (len(expected), len(received)))
length = min(len(expected), len(received))
# find the energies of both signals
expected_energy = 0
received_energy = 0
for n in range(length):
expected_energy += abs(expected[n])**2
received_energy += abs(received[n])**2
# convert to average power
expected_power = expected_energy / len(expected)
received_power = received_energy / len(received)
# normalise the energies
norm_factor = np.sqrt(expected_power/received_power)
received = [_*norm_factor for _ in received]
# if requested plot the filter coefficients
if self.PLOT:
plt.title("After Normalisation")
plt.subplot(211)
plt.plot(np.real(expected))
plt.plot(np.real(received))
plt.subplot(212)
plt.plot(np.imag(expected))
plt.plot(np.imag(received))
plt.legend(["Expected", "Received"])
plt.xlabel("Samples")
plt.ylabel("Amplitude")
plt.show()
# find the power of the expected and received signal
for n in range(length):
difference = abs(expected[n] - received[n])
if difference > self.SAMPLE_TOLERANCE:
plt.subplot(211)
plt.plot(np.real(expected))
plt.plot(np.real(received))
plt.subplot(212)
plt.plot(np.imag(expected))
plt.plot(np.imag(received))
plt.legend(["Expected", "Received"])
plt.xlabel("Samples")
plt.ylabel("Amplitude")
plt.show()
raise TestFailure("The difference between the %d sample is %f which exceeds the tolerance of %f." % (n, difference, self.SAMPLE_TOLERANCE)) | gpl-3.0 |
pyurdme/pyurdme | examples/cylinder_demo/cylinder_demo3D.py | 5 | 3718 | #!/usr/bin/env python
""" pyURDME model file for the annihilation cylinder 3D example. """
import os
import pyurdme
import dolfin
import mshr
import matplotlib.pyplot as plt
import numpy
# Global Constants
MAX_X_DIM = 5.0
MIN_X_DIM = -5.0
TOL = 1e-9
class Edge1(dolfin.SubDomain):
def inside(self, x, on_boundary):
return on_boundary and dolfin.near(x[0], MAX_X_DIM)
class Edge2(dolfin.SubDomain):
def inside(self, x, on_boundary):
return on_boundary and dolfin.near(x[0], MIN_X_DIM)
class cylinderDemo3D(pyurdme.URDMEModel):
def __init__(self, model_name="cylinder_demo3d"):
pyurdme.URDMEModel.__init__(self, model_name)
# System constants
D_const = 0.1
# Define Species
A = pyurdme.Species(name="A", diffusion_constant=D_const)
B = pyurdme.Species(name="B", diffusion_constant=D_const)
self.add_species([A, B])
# Define Geometry
pt1 = dolfin.Point(MAX_X_DIM, 0, 0)
pt2 = dolfin.Point(MIN_X_DIM, 0, 0)
cylinder = mshr.Cylinder(pt1, pt2, 1.0, 1.0)
self.mesh = pyurdme.URDMEMesh(mesh=mshr.generate_mesh(cylinder, 32))
# Define Subdomains
self.add_subdomain(Edge1(), 2)
self.add_subdomain(Edge2(), 3)
data = self.get_solver_datastructure()
vol = data['vol']
sd = data['sd']
left = numpy.sum(vol[sd == 2])
right = numpy.sum(vol[sd == 3])
k_react = pyurdme.Parameter(name="k_react", expression=1.0)
k_creat1 = pyurdme.Parameter(name="k_creat1", expression=100/left)
k_creat2 = pyurdme.Parameter(name="k_creat2", expression=100/right)
self.add_parameter([k_react, k_creat1,k_creat2])
# Define Reactions
R1 = pyurdme.Reaction(name="R1", reactants=None, products={A:1}, rate=k_creat1, restrict_to=2)
R2 = pyurdme.Reaction(name="R2", reactants=None, products={B:1}, rate=k_creat2, restrict_to=3)
R3 = pyurdme.Reaction(name="R3", reactants={A:1, B:1}, products=None, rate=k_react)
self.add_reaction([R1, R2, R3])
# Define simulation timespan
self.timespan(range(200))
if __name__ == "__main__":
model = cylinderDemo3D()
result = pyurdme.urdme(model, report_level=1)
# This line here dumps the state of A at all timepoints to Paraview comaptible output (VTK). The trajectory
# is written to a folder "Aout", where each snapshot is stored in a separate file. To open the "movie",
# just open Aout/trajectory.pvd, then you can animate etc.
if not os.path.isdir('Aout'):
print "Writing species 'A' to folder 'Aout' in VTK format"
result.export_to_vtk(species='A',folder_name="Aout")
if not os.path.isdir('Bout'):
print "Writing species 'B' to folder 'Bout' in VTK format"
result.export_to_vtk(species='B',folder_name="Bout")
if not os.path.isdir('csv_out'):
print "Writing trajectory data in CSV format"
result.export_to_csv(folder_name="csv_out")
# Plot of the time-average spatial concentration.
x_vals = model.mesh.coordinates()[:, 0]
A_vals = numpy.sum(result.get_species("A", concentration=True), axis=0)
B_vals = numpy.sum(result.get_species("B", concentration=True), axis=0)
A_sum = numpy.sum(result.get_species("A"), axis=1)
B_sum = numpy.sum(result.get_species("B"), axis=1)
print A_sum
print B_sum
data = model.get_solver_datastructure()
vol = data['vol']
sd = data['sd']
print numpy.sum(vol[sd == 2])
print numpy.sum(vol[sd == 3])
plt.plot(x_vals,A_vals,'.r',x_vals,B_vals,'.b')
plt.legend(['A', 'B'])
plt.show()
| gpl-3.0 |
xwolf12/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
benfrandsen/mPDFmodules_noDiffpy | fullfitmPDF_rhombo_nyquist_lmfit.py | 1 | 10627 | import scipy
from scipy import interpolate
from scipy.optimize.minpack import curve_fit
import numpy as np
import matplotlib.pyplot as plt
import sys
import random
from lmfit import Parameters, minimize, fit_report
from mcalculator_mod import calculateMPDF
from getmPDF import j0calc
def cv(x1,y1,x2,y2):
'''
Module to compute convolution of functions y1 and y2.
Inputs: array y1, x1, y2, x2. Should have the same grid spacing to be safe.
Returns: arrays ycv and xcv giving the convolution.
'''
dx=x1[1]-x1[0]
ycv = dx*np.convolve(y1,y2,'full')
xcv=np.linspace(x1[0]+x2[0],x1[-1]+x2[-1],len(ycv))
return xcv,ycv
def costransform(q,fq,rmin=0.0,rmax=50.0,rstep=0.1): # does not require even q-grid
'''
Module to compute cosine Fourier transform of f(q). Uses direct integration rather than FFT and does not require an even q-grid.
Inputs: array q (>=0 only), array f(q) to be transformed, optional arguments giving rmin, rmax, and rstep of output r-grid.
Returns: arrays r and fr, where fr is the cosine Fourier transform of fq.
'''
lostep = int(np.ceil((rmin - 1e-8) / rstep))
histep = int(np.floor((rmax + 1e-8) / rstep)) + 1
r = np.arange(lostep,histep)*rstep
qrmat=np.outer(r,q)
integrand=fq*np.cos(qrmat)
fr=np.sqrt(2.0/np.pi)*np.trapz(integrand,q)
return r,fr
def fitfunc(x,lowerb,upperb,scale1,scale2,width,damp,rSr,Sr,para):
'''
x: meaningless array, can be simply np.array([0]), just needed to make the curve_fit module work
scale1: scale factor of the correlated part of d(r) (coming from the ideal mPDF)
scale2: scale factor of the "paramagnetic" part of d(r)
width: smoothing factor when calculating the mPDF
theta, phi: angles giving direction of "up"-spin in the cubic coordinate system
damp: full-width half max of overall gaussian envelope applied to mPDF.
'''
[r,fr]=calculateMPDF(atoms,spins,uclist,rstep,rcalcmax,width) ### ideal f(r)
rDr,Dr = cv(r,fr,rSr,Sr) ### correlated term in d(r)
Dr = scale1*Dr
Dr[:len(para)] += scale2*para ### adding paramagnetic term
#th=(np.sin(qmax*r)-np.sin(qmin*r))/np.pi/r ### convolution to simulate effects of finite qmin and qmax
#rDrcv, Drcv = cv(rDr,Dr,r,th)
rDrcv,Drcv=rDr,Dr
dampf=np.exp(-(rDrcv/2.0/damp)**2)
Drcv=dampf*Drcv
#rDrcv, Drcv = rDr,Dr
return Drcv[np.logical_and(rDrcv>lowerb+0.5*rstep,rDrcv<=upperb+0.5*rstep)]
### Preparatory stuff
[dq,q1,q2] = [0.01,0.00,10.00]
q=np.arange(q1,q2,dq)
fMn=j0calc(q,[0.422,17.684,0.5948,6.005,0.0043,-0.609,-0.0219])
r1,r2,dr=-5.0266734,5.027141,0.0897662 #nyquist grid spacing
rsr, sr=costransform(q,fMn,rmin=r1,rmax=r2,rstep=dr)
sr = np.sqrt(np.pi/2.0)*sr
rSr,Sr = cv(rsr,sr,rsr,sr)
para = -1.0*np.sqrt(2.0*np.pi)*np.gradient(Sr,rSr[1]-rSr[0]) ### paramagnetic term in d(r)
### Prepare details for mPDF calculation
uclist=np.arange(32)
rstep=dr #nyquist grid spacing
rmin=0.09
rmax=20.0
rcalcmin = 0
rcalcmax=25.0
qmin=0.85 ### bounds for convolution to simulate effects of finite qmin and qmax
qmax=35.0
### Prepare structure
print 'Preparing initial structure...'
### Rhombohedral structure
### lattice parameters of magnetic unit cell, and some basis transformation stuff
e1=np.array([-0.5,0.5,0])
e1n=e1/np.linalg.norm(e1)
e2=np.array([0,-0.5,0.5])
e2n=e2/np.linalg.norm(e2)
e3=np.array([1,1,1])
e3n=e3/np.linalg.norm(e3)
Rbasis=np.array([e1,e2,e3])
IRbasis=np.linalg.inv(Rbasis)
Rbasisn=np.array([e1n,e2n,e3n])
#Tvals=['15']
Tvals=['15','40','70','90','100','110','112p5','115','117p5','120','125','130','200','300']
rhombo20avals={'15':3.150321,'40':3.1501,'70':3.14879,'90':3.14742,'100':3.14643,'110':3.14517,'112p5':3.1444,'115':3.14342,'117p5':3.14299,'120':3.1429284,'125':3.143242,'130':3.1434673,'200':3.146904,'300':3.1513434} ### Nyquist refinement values
rhombo20cvals={'15':7.5944326,'40':7.59639,'70':7.60716,'90':7.61846,'100':7.6268,'110':7.63692,'112p5':7.64238,'115':7.65051,'117p5':7.65622,'120':7.6574473,'125':7.657119,'130':7.6568855,'200':7.65727,'300':7.6611397}
#T='112p5'
for T in Tvals:
a=rhombo20avals[T]
c=rhombo20cvals[T]
latpars=np.array([[a],[a],[c]])
distorted=Rbasisn*latpars
cell=2*np.dot(IRbasis,distorted)
### positions of atoms in magnetic unit cell
basis=np.array([[0,0,0],[0.5,0,0],[0.25,0.25,0],[0,0.5,0],[0.25,0.75,0],[0.5,0.5,0],[0.75,0.25,0],[0.75,0.75,0],[0,0.25,0.25],[0.25,0,0.25],[0,0.75,0.25],[0.25,0.5,0.25],[0.5,0.25,0.25],[0.75,0,0.25],[0.5,0.75,0.25],[0.75,0.5,0.25],[0,0,0.5],[0,0.5,0.5],[0.25,0.25,0.5],[0.5,0,0.5],[0.25,0.75,0.5],[0.5,0.5,0.5],[0.75,0.25,0.5],[0.75,0.75,0.5],[0,0.25,0.75],[0.25,0,0.75],[0,0.75,0.75],[0.25,0.5,0.75],[0.5,0.25,0.75],[0.75,0,0.75],[0.5,0.75,0.75],[0.75,0.5,0.75]])
atomcell=np.dot(basis,cell)
### spin orientations in same order as atomic positions.
svec=np.array([1,-1,0])
svec=svec/np.linalg.norm(svec)
theta=np.pi/2
phi=-np.pi/4
width=0.2
damp=100
S=2.5
spincell=S*np.array([svec,-svec,-svec,-svec,svec,svec,svec,-svec,
-svec,-svec,svec,svec,svec,svec,-svec,-svec,
-svec,svec,svec,svec,-svec,-svec,-svec,svec,
svec,svec,-svec,-svec,-svec,-svec,svec,svec])
### how big to make the box
radius=40.
dim1=np.round(radius/np.linalg.norm(cell[0]))
dim2=np.round(radius/np.linalg.norm(cell[1]))
dim3=np.round(radius/np.linalg.norm(cell[2]))
### generate the coordinates of each unit cell
latos=np.dot(np.mgrid[-dim1:dim1+1,-dim2:dim2+1,-dim3:dim3+1].transpose().ravel().reshape((2*dim1+1)*(2*dim2+1)*(2*dim3+1),3),cell)
### select points within a desired radius from origin
latos=latos[np.where(np.apply_along_axis(np.linalg.norm,1,latos)<=(rcalcmax+10.0))]
## rearrange latos array so that [0,0,0] is the first one (for convenience)
latos[np.where(np.all(latos==[0,0,0],axis=1))]=latos[0]
latos[0]=np.array([0,0,0])
### create list of all Mn atomic positions and spin directions
atoms=np.empty([len(latos)*len(atomcell),3])
spins=np.empty([len(latos)*len(spincell),3])
index=0
for i in range(len(latos)):
for j in range(len(atomcell)):
atoms[index]=latos[i]+atomcell[j]
spins[index] = spincell[j]
index+=1
### Record positions of up-spins and down-spins to quickly change spin orientation
ups=np.where(np.all(spins==S*svec,axis=1))
downs=np.where(np.all(spins==-1.0*S*svec,axis=1))
### Experimental data
dataFile='Tdep/results_R3m_20A_nyquist/R3m_20A_Ny_'+T+'K.diff'
#dataFile='Tdep/results_R3m_20A/correctedData_stdFF_'+T+'K.txt'
#dataFile='Tdep/results_R3m_20A/scaleCorrected_R3m_20A_'+T+'K.diff'
expr,expDr,err = np.loadtxt(dataFile,unpack=True)
print 'Refining...'
def residual(pars, x, data=None):
vals = pars.valuesdict()
scalePara=vals['scalePara']
scaleCorr=vals['scaleCorr']
width=vals['width']
damp=vals['damp']
para = -1.0*np.sqrt(2.0*np.pi)*np.gradient(Sr,rSr[1]-rSr[0]) ### paramagnetic term in d(r)
#paraExpGrid=np.interp(expr,rSr,para)
model = fitfunc(x,rmin,rmax,scaleCorr,scalePara,width,damp,rSr,Sr,para)
if data is None:
return model
return (model - data)
fit_params = Parameters()
fit_params.add('scalePara', value=3.77156,vary=False)
fit_params.add('scaleCorr', value=np.random.uniform(5,25),min=0,max=30)
fit_params.add('width', value=0.2,vary=False)
fit_params.add('damp', value=100,vary=False)#1.5,min=1.0,max=100)
x=np.array([0])
data=expDr[np.logical_and(expr>rmin+0.5*rstep,expr<=rmax+0.5*rstep)]
dataErr=err[np.logical_and(expr>rmin+0.5*rstep,expr<=rmax+0.5*rstep)]
fit_kws={'sigma':err,'absolute_sigma':True}
out = minimize(residual, fit_params, args=(x,), kws={'data':data},**fit_kws)
fit = residual(fit_params, x)
print fit_report(fit_params)
rcomp = expr[np.logical_and(expr>rmin+0.5*rstep,expr<=rmax+0.5*rstep)]
diff = data-fit
chisq=np.sum((diff)**2/len(diff))
print chisq
rfull=expr[np.logical_and(expr>rcalcmin+0.5*rstep,expr<=rcalcmax+0.5*rstep)]
datafull=expDr[np.logical_and(expr>rcalcmin+0.5*rstep,expr<=rcalcmax+0.5*rstep)]
offset = 1.25*np.abs(np.min(data))
fig=plt.figure()
ax=fig.add_subplot(111)
ax.plot(rfull,datafull,marker='o',mfc='none',mec='b',linestyle='none')
ax.plot(rcomp,fit,'r-',lw=2)
ax.plot(rcomp,np.zeros_like(rcomp)-offset,'k-',rcomp,diff-offset,'g-')
ax.set_xlim(xmin=rcalcmin,xmax=rcalcmax)
ax.set_xlabel('r ($\AA$)')
ax.set_ylabel('d(r) ($\AA^{-2}$)')
# plt.show()
vals=fit_params.valuesdict()
scalePara=vals['scalePara']
scaleCorr=vals['scaleCorr']
width=vals['width']
damp=vals['damp']
###### provide options to save data
fitstring=' Experimental data: '+dataFile+\
'\n Chi-squared: '+str(chisq)+\
'\n Correlated scale: '+str(scaleCorr)+\
'\n Paramagnetic scale: '+str(scalePara)+\
'\n Broadening factor: '+str(width)+\
'\n Theta: '+str(theta)+\
'\n Phi: '+str(phi)+\
'\n Damp: '+str(damp)+\
'\n '+\
'\n Column format: r, Obs., Calc., Diff., Uncertainty'
savefile='mPDFfit_R3m_nyquist_0-20_fixedParaScale_'+T+'.txt'
#savefile='mPDFfit_correctedData_noDamp_R3m_0-20_'+T+'.txt'
np.savetxt(savefile,np.column_stack((rcomp,data,fit,diff,dataErr)),header=fitstring)
# if (raw_input('Save fitted function? (y/n) ')=='y'):
##### provide options to save data
# fitstring=' Experimental data: '+dataFile+\
# '\n Chi-squared: '+str(chisq)+\
# '\n Correlated scale: '+str(scaleCorr)+\
# '\n Paramagnetic scale: '+str(scalePara)+\
# '\n Broadening factor: '+str(width)+\
# '\n Theta: '+str(theta)+\
# '\n Phi: '+str(phi)+\
# '\n Damp: '+str(damp)+\
# '\n '+\
# '\n Column format: r, Obs., Calc., Diff.'
# savefile=raw_input('File name: ')
# np.savetxt(savefile,np.column_stack((rcomp,data,fit,diff)),header=fitstring)
| gpl-3.0 |
bssrdf/sklearn-theano | examples/plot_mnist_generator.py | 9 | 1493 | """
=======================================================
Generative networks for random MNIST digits
=======================================================
This demo of an MNIST generator is based on the work of
I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair,
A. Courville, Y.Bengio. *Generative Adversarial Networks*, June 2014.
The generators trained as part of the published experiment have been wrapped in
sklearn-theano, and can easily be used to fetch an arbitrary number of plausible
MNIST digits.
Additionally, this example also shows how to make an automatically updating plot
with the 'TkAgg' backend to matplotlib.
"""
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import time
from sklearn_theano.datasets import fetch_mnist_generated
X = fetch_mnist_generated(n_samples=1600, random_state=1999)
# plotting based on
# http://stackoverflow.com/questions/4098131/matplotlib-update-a-plot
num_updates = len(X) // 16
f, axarr = plt.subplots(4, 4)
objarr = np.empty_like(axarr)
for n, ax in enumerate(axarr.flat):
objarr.flat[n] = ax.imshow(X[n], cmap='gray', interpolation='nearest')
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
plt.show(block=False)
for i in range(num_updates):
for n, obj in enumerate(objarr.flat):
obj.set_data(X[i * len(objarr.flat) + n])
plt.draw()
time.sleep(.08)
if (i % 20) == 0:
print("Iteration %i" % i)
plt.show()
| bsd-3-clause |
JosmanPS/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
OpenDataDayBilbao/teseo2014 | data/analyzer.py | 2 | 25355 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 22 08:55:14 2014
@author: aitor
"""
import mysql.connector
import networkx as nx
from networkx.generators.random_graphs import barabasi_albert_graph
import json
import os.path
import numpy as np
import pandas as pd
from pandas import Series
from pandas import DataFrame
import matplotlib.pyplot as plt
config = {
'user': 'aitor',
'password': 'pelicano',
'host': 'thor.deusto.es',
'database': 'teseo_clean',
}
persons_university = []
persons_id = []
first_level_topic_list = {
11: 'Logic',
12: 'Mathematics',
21: 'Astronomy, Astrophysics',
22: 'Physics',
23: 'Chemistry',
24: 'Life Sciences',
25: 'Earth and space science',
31: 'Agricultural Sciences',
32: 'Medical Sciences',
33: 'Technological Sciences',
51: 'Anthropology',
52: 'Demography',
53: 'Economic Sciences',
54: 'Geography',
55: 'History',
56: 'Juridical Science and Law',
57: 'Linguistics',
58: 'Pedagogy',
59: 'Political Science',
61: 'Psychology',
62: 'Sciences of Arts and Letters',
63: 'Sociology',
71: 'Ethics',
72: 'Philosophy',
}
# Execute it once
def get_persons_university():
p_u = {}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = "SELECT thesis.author_id, thesis.university_id, university.name, university.location, person.name FROM thesis, university, person WHERE thesis.university_id = university.id AND thesis.author_id = person.id"
cursor.execute(query)
for thesis in cursor:
p_u[thesis[0]] = {
"university" : {"id" : thesis[1], "name" : thesis[2], "location" : thesis[3]},
"author" : {"name" : thesis[4]}
}
cursor.close()
cnx.close()
json.dump(p_u, open("./cache/persons_university.json", "w"), indent=2)
def load_persons_university():
print "Loading the persons_university cache..."
if not os.path.isfile("./cache/persons_university.json"):
print " - Building the persons_university cache..."
get_persons_university()
p_u = json.load(open("./cache/persons_university.json", "r"))
print "done"
return p_u
def get_persons_id():
p_i = {}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = "SELECT person.id, person.name FROM person"
cursor.execute(query)
for person in cursor:
p_i[person[0]] = person[1]
cursor.close()
cnx.close()
json.dump(p_i, open("./cache/persons_id.json", "w"), indent = 2)
def load_persons_id():
print "Loading the persons_id cache..."
if not os.path.isfile("./cache/persons_id.json"):
print " - Building the persons_id cache..."
get_persons_university()
p_u = json.load(open("./cache/persons_id.json", "r"))
print "done"
return p_u
persons_university = load_persons_university()
persons_id = load_persons_id()
def build_thesis_genealogy():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = "SELECT thesis.author_id, advisor.person_id FROM thesis, advisor WHERE thesis.id = advisor.thesis_id"
cursor.execute(query)
G = nx.DiGraph()
for thesis in cursor:
G.add_edge(thesis[1], thesis[0])
i = 0
for n in G.nodes():
try:
node = str(n)
G.node[n]["name"] = persons_id[node]
try:
G.node[n]["university"] = persons_university[node]["university"]["name"]
G.node[n]["location"] = persons_university[node]["university"]["location"]
i += 1
except:
G.node[n]["university"] = "none"
G.node[n]["location"] = "none"
except:
print n
print "Total persons with a location:", i
cursor.close()
cnx.close()
nx.write_gexf(G, "./networks/genealogy.gexf")
return G
def build_panel_network(with_weigh = True):
cnx = mysql.connector.connect(**config)
print "Recovering thesis ids"
cursor = cnx.cursor()
query = "SELECT id FROM thesis"
cursor.execute(query)
thesis_ids = []
for thesis in cursor:
thesis_ids.append(thesis[0])
cursor.close()
print "Creating panel network"
cursor = cnx.cursor()
G = nx.Graph()
for c, thesis_id in enumerate(thesis_ids):
if c % 1000 == 0:
print c, "of", len(thesis_ids)
cursor.execute("SELECT person_id FROM panel_member WHERE thesis_id = " + str(thesis_id))
members = []
for member in cursor:
members.append(member[0])
for i, m1 in enumerate(members):
for m2 in members[i+1:]:
if with_weigh:
if not G.has_edge(m1, m2):
G.add_edge(m1,m2, weight = 1)
else:
G.edge[m1][m2]['weight'] += 1
else:
G.add_edge(m1,m2)
cursor.close()
cnx.close()
nx.write_gexf(G, "./networks/panels.gexf")
return G
def get_first_level_descriptors():
cnx = mysql.connector.connect(**config)
print "Recovering first level descriptors"
cursor = cnx.cursor()
query = "select id, text, code from descriptor where parent_code IS NULL"
cursor.execute(query)
descriptors = {}
for d in cursor:
descriptors[d[2]] = {"id" : d[0], "text" : d[1]}
cursor.close()
cnx.close()
return descriptors
def build_panel_network_by_descriptor(unesco_code):
cnx = mysql.connector.connect(**config)
print "Recovering thesis ids"
cursor = cnx.cursor()
query = """SELECT thesis_id
FROM association_thesis_description, descriptor
WHERE association_thesis_description.descriptor_id = descriptor.id
AND descriptor.code DIV 10000 = """ + str(unesco_code)
cursor.execute(query)
thesis_ids = []
for thesis in cursor:
thesis_ids.append(thesis[0])
cursor.close()
print "Creating panel network"
cursor = cnx.cursor()
G = nx.Graph()
for c, thesis_id in enumerate(thesis_ids):
if c % 1000 == 0:
print c, "of", len(thesis_ids)
cursor.execute("SELECT person_id FROM panel_member WHERE thesis_id = " + str(thesis_id))
members = []
for member in cursor:
members.append(member[0])
for i, m1 in enumerate(members):
for m2 in members[i+1:]:
if not G.has_edge(m1, m2):
G.add_edge(m1,m2, weight = 1)
else:
G.edge[m1][m2]['weight'] += 1
cursor.close()
cnx.close()
nx.write_gexf(G, "./networks/panels-" + str(unesco_code) + ".gexf")
return G
def generate_random_graph(n, m):
print "Building random graph"
G = barabasi_albert_graph(n, m, 10)
return G
def analize_cliques(G):
print "Calculating cliques..."
cliques = nx.find_cliques(G)
print "Analysing the results..."
tot_cliques = 0
tot_size = 0
max_size = 0
min_size = 10000
high_5 = 0
hist_clic = {}
for c in cliques:
tot_cliques += 1
tot_size += len(c)
if len(c) > 5: #5 is the panel size in Spain
high_5 += 1
if len(c) > max_size :
max_size = len(c)
if len(c) < min_size:
min_size = len(c)
if hist_clic.has_key(len(c)):
hist_clic[len(c)] += 1
else:
hist_clic[len(c)] = 1
print "CLIQUES:"
print " - Total cliques:", tot_cliques
print " - Avg cliques size:", tot_size * 1.0 / tot_cliques
print " - Max clique:", max_size
print " - Min clique:", min_size
print " - Cliques with a size higher than 5:", high_5
print " - histogram:", hist_clic
results = {}
results['clique_tot'] = tot_cliques
results['clique_avg'] = tot_size * 1.0 / tot_cliques
results['clique_max'] = max_size
results['clique_min'] = min_size
results['clique_greater_5'] = high_5
results['clique_greater_5_norm'] = high_5 * 1.0 / tot_cliques
#results['clique_histogram'] = hist_clic
return results
def analize_degrees(G):
print "Calculating degrees..."
degrees = nx.degree(G)
hist = nx.degree_histogram(G)
print "DEGREES:"
print " - Max degree:", max(degrees.values())
print " - Min degree:", min(degrees.values())
print " - Avg. degree:", sum(degrees.values()) * 1.0 / len(degrees)
print " - histogram:", hist
results = {}
results['degree_avg'] = sum(degrees.values()) * 1.0 / len(degrees)
results['degree_max'] = max(degrees.values())
results['degree_min'] = min(degrees.values())
#results['degree_histogram'] = hist
return results
def analize_edges(G):
print "Analizing edges..."
min_weight = 10000
max_weight = 0
acum_weight = 0
hist_weight = {}
for e in G.edges(data=True):
acum_weight += e[2]['weight']
if max_weight < e[2]['weight']:
max_weight = e[2]['weight']
if min_weight > e[2]['weight']:
min_weight = e[2]['weight']
if hist_weight.has_key(e[2]['weight']):
hist_weight[e[2]['weight']] += 1
else:
hist_weight[e[2]['weight']] = 1
print "EDGES:"
print " - Max weight:", max_weight
print " - Min weight:", min_weight
print " - Avg weight:", acum_weight * 1.0 / len(G.edges())
print " - histogram:", hist_weight
results = {}
results['weight_avg'] = acum_weight * 1.0 / len(G.edges())
results['weight_max'] = max_weight
results['weight_min'] = min_weight
#results['weight_histogram'] = hist_weight
return results
def analyze_rdn_graph():
G = generate_random_graph(188979, 7) #nodes and nodes/edges
nx.write_gexf(G, "./networks/barabasi_panel.gexf")
print "Nodes:", G.number_of_nodes()
print "Edges:", G.number_of_edges()
analize_cliques(G)
analize_degrees(G)
def analyze_first_level_panels():
results = {}
for d in first_level_topic_list:
print "\n*********DESCRIPTOR: " + first_level_topic_list[d] + "(" + str(d) + ")"
G = build_panel_network_by_descriptor(d)
print "\nDESCRIPTOR: " + first_level_topic_list[d] + "(" + str(d) + ")"
print "Nodes:", G.number_of_nodes()
print "Edges:", G.number_of_edges()
res_clique = analize_cliques(G)
res_degree = analize_degrees(G)
res_weight = analize_edges(G)
d_final = dict(res_clique)
d_final.update(res_degree)
d_final.update(res_weight)
d_final['id'] = d
d_final['avg_clustering'] = nx.average_clustering(G)
results[first_level_topic_list[d]] = d_final
print "Writing json..."
json.dump(results, open('./networks/first_level_panels_analysis.json','w'), indent = 2)
print "Writing csvs..."
df = DataFrame(results)
df.to_csv('./networks/first_level_panels_analysis.csv')
dfinv = df.transpose()
dfinv.to_csv('./networks/first_level_panels_analysis_inv.csv')
def from_json_to_dataframe():
results = json.load(open('./networks/first_level_analysis.json','r'))
df = DataFrame(results)
df.to_csv("panels.csv")
dft = df.transpose()
dft.to_csv("panels_trans.csv")
return df
#df = DataFrame(['id', 'name', 'clique_tot', 'clique_avg', 'clique_max', 'clique_min', 'clique_greater_5', 'degree_max', 'degree_min', 'degree_avg', 'weight_max', 'weight_min', 'weight_avg']);
def panel_repetition_per_advisor():
cnx = mysql.connector.connect(**config)
print "Recovering thesis ids for each advisor..."
cursor = cnx.cursor()
query = "SELECT person_id, thesis_id FROM advisor"
cursor.execute(query)
thesis_advisor = {}
for thesis in cursor:
adv_id = thesis[0]
thesis_id = thesis[1]
if thesis_advisor.has_key(adv_id):
thesis_advisor[adv_id].append(thesis_id)
else:
thesis_advisor[adv_id] = [thesis_id]
cursor.close()
print "Counting repetitions..."
cursor = cnx.cursor()
results = {}
for c, adv in enumerate(thesis_advisor):
if c % 500 == 0:
print c, "of", len(thesis_advisor)
thesis_ids = thesis_advisor[adv]
adv_id = adv
for thesis_id in thesis_ids:
cursor.execute("SELECT person_id FROM panel_member WHERE thesis_id = " + str(thesis_id))
for member in cursor:
if results.has_key(adv_id):
if results[adv_id].has_key(member[0]):
results[adv_id][member[0]] += 1
else:
results[adv_id][member[0]] = 0
else:
results[adv_id] = {member[0] : 0}
cursor.close()
cnx.close()
json.dump(results, open('./networks/repetitions_per_advisor.json', 'w'), indent=2)
print "Procesing total repetitons"
repetitions_per_advisor = {}
for adv in results:
total_rep = 0
for rep in results[adv]:
total_rep += results[adv][rep]
repetitions_per_advisor[adv] = total_rep
return repetitions_per_advisor
def thesis_per_year():
results = {}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
for year in range(1977,2015):
query = "SELECT count(defense_date) FROM thesis WHERE year(defense_date)=year('" + str(year) + "-01-01')"
cursor.execute(query)
for r in cursor:
results[year] = r[0]
cursor.close()
cnx.close()
return results
def thesis_per_location():
results = {}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
cursor.execute("select distinct(location) from university")
locations = []
for l in cursor:
locations.append(l[0])
results = {}
for location in locations:
query = "SELECT count(thesis.id) FROM thesis, university WHERE university.location = '" + location + "'"
cursor.execute(query)
for r in cursor:
results[location] = r[0]
cursor.close()
cnx.close()
return results
def advisor_genders_by_topic():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
results = {}
for topic in first_level_topic_list:
print "Topic:", topic
print 'Getting thesis ids for topic...'
thesis_ids = []
cursor.execute("SELECT thesis_id FROM association_thesis_description, descriptor WHERE descriptor.id = association_thesis_description.descriptor_id AND descriptor.code DIV 10000 = " + str(topic))
for t_id in cursor:
thesis_ids.append(t_id)
print 'Number of thesis:', len(thesis_ids)
print 'Counting genders...'
male = 0
female = 0
unknown = 0
for thesis in thesis_ids:
query = "SELECT COUNT(advisor.person_id) FROM advisor, person, thesis WHERE thesis.id = advisor.thesis_id AND person.id = advisor.person_id AND person.gender = 'male' AND thesis.id = " + str(thesis[0])
cursor.execute(query)
for r in cursor:
male += r[0]
query = "SELECT COUNT(advisor.person_id) FROM advisor, person, thesis WHERE thesis.id = advisor.thesis_id AND person.id = advisor.person_id AND person.gender = 'female' AND thesis.id = " + str(thesis[0])
cursor.execute(query)
for r in cursor:
female += r[0]
query = "SELECT COUNT(advisor.person_id) FROM advisor, person, thesis WHERE thesis.id = advisor.thesis_id AND person.id = advisor.person_id AND person.gender = 'none' AND thesis.id = " + str(thesis[0])
cursor.execute(query)
for r in cursor:
unknown += r[0]
if len(thesis_ids) > 0:
results[first_level_topic_list[topic]] = {'male' : male, 'female' : female, 'unknown' : unknown}
cursor.close()
cnx.close()
print "Saving json"
json.dump(results, open('advisor_gender_by_topic.json','w'))
print "Saving csv"
df = DataFrame(results)
df.to_csv("advisor_gender_by_topic.csv")
return results
def analyze_advisor_student_genders():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
print "Recovering advisor-student pairs..."
cursor.execute("SELECT thesis.author_id, advisor.person_id FROM thesis, advisor WHERE thesis.id = advisor.thesis_id")
adv_stu = []
for advisor in cursor:
adv_stu.append([advisor[1], advisor[0]])
print "Recovering genders..."
genders = {}
cursor.execute("SELECT person.id, person.gender FROM person")
for person in cursor:
genders[person[0]] = person[1]
cursor.close()
cnx.close()
print "Counting..."
results = {}
results["MM"] = 0
results["FF"] = 0
results["FM"] = 0
results["MF"] = 0
for pair in adv_stu:
try:
adv_gender = genders[pair[0]]
stu_gender = genders[pair[1]]
except:
adv_gender = 'none'
stu_gender = 'none'
if adv_gender == 'male':
if stu_gender == 'male':
results['MM'] += 1
elif stu_gender == 'female':
results['MF'] += 1
elif adv_gender == 'female':
if stu_gender == 'male':
results['FM'] += 1
elif stu_gender == 'female':
results['FF'] += 1
return results
def analyze_advisor_student_genders_by_topic():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
print "Recovering genders..."
genders = {}
cursor.execute("SELECT person.id, person.gender FROM person")
for person in cursor:
genders[person[0]] = person[1]
topic_genders = json.load(open('advisor_gender_by_topic.json','r'))
topic_gender_pairs = {}
for topic in first_level_topic_list:
print "Topic:", topic
print "Recovering advisor-student pairs..."
query = """ SELECT thesis.author_id, advisor.person_id
FROM thesis, advisor, descriptor, association_thesis_description
WHERE descriptor.id = association_thesis_description.descriptor_id
AND thesis.id = advisor.thesis_id
AND thesis.id = association_thesis_description.thesis_id
AND descriptor.code DIV 10000 = """ + str(topic)
cursor.execute(query)
adv_stu = []
for advisor in cursor:
adv_stu.append([advisor[1], advisor[0]])
if len(adv_stu) > 0:
print "Counting..."
results = {}
results["MM"] = 0
results["FF"] = 0
results["FM"] = 0
results["MF"] = 0
for pair in adv_stu:
try:
adv_gender = genders[pair[0]]
stu_gender = genders[pair[1]]
except:
adv_gender = 'none'
stu_gender = 'none'
if adv_gender == 'male':
if stu_gender == 'male':
results['MM'] += 1
elif stu_gender == 'female':
results['MF'] += 1
elif adv_gender == 'female':
if stu_gender == 'male':
results['FM'] += 1
elif stu_gender == 'female':
results['FF'] += 1
results["MM_norm"] = results["MM"] * 1.0 / topic_genders[str(topic)]['male']
results["FF_norm"] = results["FF"] * 1.0 / topic_genders[str(topic)]['female']
results["FM_norm"] = results["FM"] * 1.0 / topic_genders[str(topic)]['female']
results["MF_norm"] = results["MF"] * 1.0 / topic_genders[str(topic)]['male']
topic_gender_pairs[first_level_topic_list[topic]] = results
cursor.close()
cnx.close()
print "Saving json"
json.dump(topic_gender_pairs, open('gender_pairs_by_topic.json','w'))
print "Saving csv"
df = DataFrame(topic_gender_pairs)
df.to_csv("gender_pairs_by_topic.csv")
return topic_gender_pairs
def count_persons_with_multiple_thesis():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
persons_id = []
cursor.execute("SELECT person.id FROM person")
for person in cursor:
persons_id.append(person[0])
results = {}
histogram = {}
for i, p_id in enumerate(persons_id):
if i % 2000 == 0:
print i, 'of', len(persons_id)
cursor.execute("SELECT COUNT(thesis.id) FROM thesis WHERE thesis.author_id = " + str(p_id))
for r in cursor:
if r[0] > 1:
results[p_id] = r[0]
if histogram.has_key(r[0]):
histogram[r[0]] += 1
else:
histogram[r[0]] = 1
cursor.close()
cnx.close()
print "Writing json..."
json.dump(results, open('multiple_thesis.json','w'))
json.dump(histogram, open('multiple_thesis_hist.json','w'))
return results, histogram
def count_panel_members():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
print "Getting thesis ids..."
cursor.execute("SELECT id FROM thesis")
thesis_ids = []
for r in cursor:
thesis_ids.append(r[0])
results = {}
print "Counting panel members"
for i, t_id in enumerate(thesis_ids):
if i % 2000 == 0:
print i, 'of', len(thesis_ids)
cursor.execute("SELECT count(panel_member.person_id) FROM panel_member WHERE panel_member.thesis_id = " + str(t_id))
for r in cursor:
if results.has_key(r[0]):
results[r[0]] += 1
else:
results[r[0]] = 1
cursor.close()
cnx.close()
return results
def create_gender_pie():
male = 221579.0
female = 80363.0
none = 21428.0
total = male + female + none
labels = ['Male', 'Female', 'Unknown']
sizes = [male/total*100, female/total*100, none/total*100]
colors = ['lightblue', 'pink', 'gold']
plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%')
plt.axis('equal')
plt.show()
def create_advisor_gender_pie():
male = 165506.0
female = 37012.0
none = 11229.0
total = male + female + none
labels = ['Male', 'Female', 'Unknown']
sizes = [male/total*100, female/total*100, none/total*100]
colors = ['lightblue', 'pink', 'gold']
plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%')
plt.axis('equal')
plt.show()
def create_student_gender_pie():
male = 115423.0
female = 52184.0
none = 9742.0
total = male + female + none
labels = ['Male', 'Female', 'Unknown']
sizes = [male/total*100, female/total*100, none/total*100]
colors = ['lightblue', 'pink', 'gold']
plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%')
plt.axis('equal')
plt.show()
def create_panel_gender_pie():
male = 674748.0
female = 139170.0
none = 44765.0
total = male + female + none
labels = ['Male', 'Female', 'Unknown']
sizes = [male/total*100, female/total*100, none/total*100]
colors = ['lightblue', 'pink', 'gold']
plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%')
plt.axis('equal')
plt.show()
def create_number_of_thesis_bar():
values = [1552, 126, 33, 7, 2]
fig, ax = plt.subplots()
index = np.arange(len(values))
width = 0.30
plt.bar(index, values)
plt.xlabel('Number of thesis')
plt.ylabel('Total persons')
plt.title('Number of thesis by person (> 2)')
plt.xticks(index + width, ('2', '3', '4', '5', '6'))
plt.legend()
plt.tight_layout()
plt.show()
if __name__=='__main__':
print "starting"
print create_number_of_thesis_bar()
print "fin" | apache-2.0 |
nushio3/cmaes | python/cma.py | 1 | 261903 | #!/usr/bin/env python2
"""Module cma implements the CMA-ES, Covariance Matrix Adaptation Evolution
Strategy, a stochastic optimizer for robust non-linear non-convex
derivative-free function minimization for Python versions 2.6 and 2.7
(for Python 2.5 class SolutionDict would need to be re-implemented, because
it depends on collections.MutableMapping, since version 0.91.01).
CMA-ES searches for a minimizer (a solution x in R**n) of an
objective function f (cost function), such that f(x) is
minimal. Regarding f, only function values for candidate solutions
need to be available, gradients are not necessary. Even less
restrictive, only a passably reliable ranking of the candidate
solutions in each iteration is necessary, the function values
itself do not matter. Some termination criteria however depend
on actual f-values.
Two interfaces are provided:
- function `fmin(func, x0, sigma0,...)`
runs a complete minimization
of the objective function func with CMA-ES.
- class `CMAEvolutionStrategy`
allows for minimization such that the
control of the iteration loop remains with the user.
Used packages:
- unavoidable: `numpy` (see `barecmaes2.py` if `numpy` is not
available),
- avoidable with small changes: `time`, `sys`
- optional: `matplotlib.pylab` (for `plot` etc., highly
recommended), `pprint` (pretty print), `pickle` (in class
`Sections`), `doctest`, `inspect`, `pygsl` (never by default)
Testing
-------
The code can be tested on a given system. Typing::
python cma.py --test --quiet
or in the Python shell ``ipython -pylab``::
run cma.py --test --quiet
runs ``doctest.testmod(cma)`` showing only exceptions (and not the
tests that fail due to small differences in the output) and should
run without complaints in about under two minutes. On some systems,
the pop up windows must be closed manually to continue and finish
the test.
Install
-------
The code can be installed by::
python cma.py --install
where the ``setup`` function from package ``distutils.core`` is used.
Example
-------
::
import cma
help(cma) # "this" help message, use cma? in ipython
help(cma.fmin)
help(cma.CMAEvolutionStrategy)
help(cma.Options)
cma.Options('tol') # display 'tolerance' termination options
cma.Options('verb') # display verbosity options
res = cma.fmin(cma.Fcts.tablet, 15 * [1], 1)
res[0] # best evaluated solution
res[5] # mean solution, presumably better with noise
:See: `fmin()`, `Options`, `CMAEvolutionStrategy`
:Author: Nikolaus Hansen, 2008-2012
:License: GPL 2 and 3
"""
from __future__ import division # future is >= 3.0, this code has been used with 2.6 & 2.7
from __future__ import with_statement # only necessary for python 2.5 and not in heavy use
# from __future__ import collections.MutableMapping # does not exist in future, otherwise 2.5 would work
# from __future__ import print_function # for cross-checking, available from python 2.6
__version__ = "0.91.02 $Revision: 3168 $"
# $Date: 2012-03-09 18:35:03 +0100 (Fri, 09 Mar 2012) $
# bash: svn propset svn:keywords 'Date Revision' cma.py
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 or 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# for testing:
# pyflakes cma.py # finds bugs by static analysis
# pychecker --limit 60 cma.py # also executes, gives 60 warnings (all checked)
# python cma.py -t -quiet # executes implemented tests based on doctest
# to create a html documentation file:
# pydoc -w cma # edit the header (remove local pointers)
# epydoc cma.py # comes close to javadoc but does not find the
# # links of function references etc
# doxygen needs @package cma as first line in the module docstring
# some things like class attributes are not interpreted correctly
# sphinx: doc style of doc.python.org, could not make it work
# TODO: make those options that are only used in fmin an error in init of CMA, but still Options() should
# work as input to CMA.
# TODO: add a default logger in CMAEvolutionStrategy, see fmin() and optimize() first
# tell() should probably not add data, but optimize() should handle even an after_iteration_handler.
# TODO: CMAEvolutionStrategy(ones(10), 1).optimize(cma.fcts.elli) # should work like fmin
# one problem: the data logger is not default and seemingly cannot be attached in one line
# TODO: check combination of boundary handling and transformation: penalty must be computed
# on gp.pheno(x_geno, bounds=None), but without bounds, check/remove usage of .geno everywhere
# TODO: check whether all new solutions are put into self.sent_solutions
# TODO: separate initialize==reset_state from __init__
# TODO: introduce Zpos == diffC which makes the code more consistent and the active update "exact"
# TODO: split tell into a variable transformation part and the "pure" functionality
# usecase: es.tell_geno(X, [func(es.pheno(x)) for x in X])
# genotypic repair is not part of tell_geno
# TODO: read settable "options" from a (properties) file, see myproperties.py
#
# typical parameters in scipy.optimize: disp, xtol, ftol, maxiter, maxfun, callback=None
# maxfev, diag (A sequency of N positive entries that serve as
# scale factors for the variables.)
# full_output -- non-zero to return all optional outputs.
# If xtol < 0.0, xtol is set to sqrt(machine_precision)
# 'infot -- a dictionary of optional outputs with the keys:
# 'nfev': the number of function calls...
#
# see eg fmin_powell
# typical returns
# x, f, dictionary d
# (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>)
#
# TODO: keep best ten solutions
# TODO: implement constraints handling
# TODO: option full_output -- non-zero to return all optional outputs.
# TODO: extend function unitdoctest, or use unittest?
# TODO: implement equal-fitness termination, covered by stagnation?
# TODO: apply style guide: no capitalizations!?
# TODO: check and test dispdata()
# TODO: eigh(): thorough testing would not hurt
#
# TODO (later): implement readSignals from a file like properties file (to be called after tell())
import sys, time # not really essential
import collections, numpy as np # arange, cos, size, eye, inf, dot, floor, outer, zeros, linalg.eigh, sort, argsort, random, ones,...
from numpy import inf, array, dot, exp, log, sqrt, sum # to access the built-in sum fct: __builtins__.sum or del sum removes the imported sum and recovers the shadowed
try:
import matplotlib.pylab as pylab # also: use ipython -pylab
show = pylab.show
savefig = pylab.savefig # we would like to be able to use cma.savefig() etc
closefig = pylab.close
except:
pylab = None
print(' Could not import matplotlib.pylab, therefore ``cma.plot()`` etc. is not available')
def show():
pass
__docformat__ = "reStructuredText" # this hides some comments entirely?
sys.py3kwarning = True # TODO: out-comment from version 2.6
# why not package math?
# TODO: check scitools.easyviz and how big the adaptation would be
# changes:
# 12/07/21: convert value True for noisehandling into 1 making the output compatible
# 12/01/30: class Solution and more old stuff removed r3101
# 12/01/29: class Solution is depreciated, GenoPheno and SolutionDict do the job (v0.91.00, r3100)
# 12/01/06: CMA_eigenmethod option now takes a function (integer still works)
# 11/09/30: flat fitness termination checks also history length
# 11/09/30: elitist option (using method clip_or_fit_solutions)
# 11/09/xx: method clip_or_fit_solutions for check_points option for all sorts of
# injected or modified solutions and even reliable adaptive encoding
# 11/08/19: fixed: scaling and typical_x type clashes 1 vs array(1) vs ones(dim) vs dim * [1]
# 11/07/25: fixed: fmin wrote first and last line even with verb_log==0
# fixed: method settableOptionsList, also renamed to versatileOptions
# default seed depends on time now
# 11/07/xx (0.9.92): added: active CMA, selective mirrored sampling, noise/uncertainty handling
# fixed: output argument ordering in fmin, print now only used as function
# removed: parallel option in fmin
# 11/07/01: another try to get rid of the memory leak by replacing self.unrepaired = self[:]
# 11/07/01: major clean-up and reworking of abstract base classes and of the documentation,
# also the return value of fmin changed and attribute stop is now a method.
# 11/04/22: bug-fix: option fixed_variables in combination with scaling
# 11/04/21: stopdict is not a copy anymore
# 11/04/15: option fixed_variables implemented
# 11/03/23: bug-fix boundary update was computed even without boundaries
# 11/03/12: bug-fix of variable annotation in plots
# 11/02/05: work around a memory leak in numpy
# 11/02/05: plotting routines improved
# 10/10/17: cleaning up, now version 0.9.30
# 10/10/17: bug-fix: return values of fmin now use phenotyp (relevant
# if input scaling_of_variables is given)
# 08/10/01: option evalparallel introduced,
# bug-fix for scaling being a vector
# 08/09/26: option CMAseparable becomes CMA_diagonal
# 08/10/18: some names change, test functions go into a class
# 08/10/24: more refactorizing
# 10/03/09: upper bound exp(min(1,...)) for step-size control
# TODO: this would define the visible interface
# __all__ = ['fmin', 'CMAEvolutionStrategy', 'plot', ...]
#
# emptysets = ('', (), [], {}) # array([]) does not work but also np.size(.) == 0
# "x in emptysets" cannot be well replaced by "not x"
# which is also True for array([]) and None, but also for 0 and False, and False for NaN
use_sent_solutions = True # 5-30% CPU slower, particularly for large lambda, will be mandatory soon
#____________________________________________________________
#____________________________________________________________
#
def unitdoctest():
"""is used to describe test cases and might in future become helpful
as an experimental tutorial as well. The main testing feature at the
moment is by doctest with ``cma._test()`` or conveniently by
``python cma.py --test``. Unfortunately, depending on the
system, the results will slightly differ and many "failed" test cases
might be reported. This is prevented with the --quiet option.
A simple first overall test:
>>> import cma
>>> res = cma.fmin(cma.fcts.elli, 3*[1], 1, CMA_diagonal=2, seed=1, verb_time=0)
(3_w,7)-CMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=1)
Covariance matrix is diagonal for 2 iterations (1/ccov=7.0)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 7 1.453161670768570e+04 1.2e+00 1.08e+00 1e+00 1e+00
2 14 3.281197961927601e+04 1.3e+00 1.22e+00 1e+00 2e+00
3 21 1.082851071704020e+04 1.3e+00 1.24e+00 1e+00 2e+00
100 700 8.544042012075362e+00 1.4e+02 3.18e-01 1e-03 2e-01
200 1400 5.691152415221861e-12 1.0e+03 3.82e-05 1e-09 1e-06
220 1540 3.890107746209078e-15 9.5e+02 4.56e-06 8e-11 7e-08
termination on tolfun : 1e-11
final/bestever f-value = 3.89010774621e-15 2.52273602735e-15
mean solution: [ -4.63614606e-08 -3.42761465e-10 1.59957987e-11]
std deviation: [ 6.96066282e-08 2.28704425e-09 7.63875911e-11]
Test on the Rosenbrock function with 3 restarts. The first trial only
finds the local optimum, which happens in about 20% of the cases.
>>> import cma
>>> res = cma.fmin(cma.fcts.rosen, 4*[-1],1, ftarget=1e-6, restarts=3, verb_time=0, verb_disp=500, seed=3)
(4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=3)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 8 4.875315645656848e+01 1.0e+00 8.43e-01 8e-01 8e-01
2 16 1.662319948123120e+02 1.1e+00 7.67e-01 7e-01 8e-01
3 24 6.747063604799602e+01 1.2e+00 7.08e-01 6e-01 7e-01
184 1472 3.701428610430019e+00 4.3e+01 9.41e-07 3e-08 5e-08
termination on tolfun : 1e-11
final/bestever f-value = 3.70142861043 3.70142861043
mean solution: [-0.77565922 0.61309336 0.38206284 0.14597202]
std deviation: [ 2.54211502e-08 3.88803698e-08 4.74481641e-08 3.64398108e-08]
(8_w,16)-CMA-ES (mu_w=4.8,w_1=32%) in dimension 4 (seed=4)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 1489 2.011376859371495e+02 1.0e+00 8.90e-01 8e-01 9e-01
2 1505 4.157106647905128e+01 1.1e+00 8.02e-01 7e-01 7e-01
3 1521 3.548184889359060e+01 1.1e+00 1.02e+00 8e-01 1e+00
111 3249 6.831867555502181e-07 5.1e+01 2.62e-02 2e-04 2e-03
termination on ftarget : 1e-06
final/bestever f-value = 6.8318675555e-07 1.18576673231e-07
mean solution: [ 0.99997004 0.99993938 0.99984868 0.99969505]
std deviation: [ 0.00018973 0.00038006 0.00076479 0.00151402]
>>> assert res[1] <= 1e-6
Notice the different termination conditions. Termination on the target
function value ftarget prevents further restarts.
Test of scaling_of_variables option
>>> import cma
>>> opts = cma.Options()
>>> opts['seed'] = 456
>>> opts['verb_disp'] = 0
>>> opts['CMA_active'] = 1
>>> # rescaling of third variable: for searching in roughly
>>> # x0 plus/minus 1e3*sigma0 (instead of plus/minus sigma0)
>>> opts.scaling_of_variables = [1, 1, 1e3, 1]
>>> res = cma.fmin(cma.fcts.rosen, 4 * [0.1], 0.1, **opts)
termination on tolfun : 1e-11
final/bestever f-value = 2.68096173031e-14 1.09714829146e-14
mean solution: [ 1.00000001 1.00000002 1.00000004 1.00000007]
std deviation: [ 3.00466854e-08 5.88400826e-08 1.18482371e-07 2.34837383e-07]
The printed std deviations reflect the actual true value (not the one
in the internal representation which would be different).
:See: cma.main(), cma._test()
"""
pass
#____________________________________________________________
#____________________________________________________________
#
class BlancClass(object):
"""blanc container class for having a collection of attributes"""
#_____________________________________________________________________
#_____________________________________________________________________
#
class DerivedDictBase(collections.MutableMapping):
"""for conveniently adding features to a dictionary. The actual
dictionary is in ``self.data``. Copy-paste
and modify setitem, getitem, and delitem, if necessary"""
def __init__(self, *args, **kwargs):
# collections.MutableMapping.__init__(self)
super(DerivedDictBase, self).__init__()
# super(SolutionDict, self).__init__() # the same
self.data = dict(*args, **kwargs)
def __len__(self):
return len(self.data)
def __contains__(self, value):
return value in self.data
def __iter__(self):
return iter(self.data)
def __setitem__(self, key, value):
"""defines self[key] = value"""
self.data[key] = value
def __getitem__(self, key):
"""defines self[key]"""
return self.data[key]
def __delitem__(self, key):
del self.data[key]
class SolutionDict(DerivedDictBase):
"""dictionary with computation of an hash key for the inserted solutions and
a stack of previously inserted same solutions.
Each entry is meant to store additional information related to the solution.
>>> import cma, numpy as np
>>> d = cma.SolutionDict()
>>> x = np.array([1,2,4])
>>> d[x] = {'x': x, 'iteration': 1}
>>> d.get(x) == (d[x] if d.key(x) in d.keys() else None)
The last line is always true.
TODO: data_with_same_key behaves like a stack (see setitem and delitem), but rather should behave like a queue?!
A queue is less consistent with the operation self[key] = ..., if self.data_with_same_key[key] is not empty.
"""
def __init__(self, *args, **kwargs):
DerivedDictBase.__init__(self, *args, **kwargs)
self.data_with_same_key = {}
def key(self, x):
try:
return tuple(x)
except TypeError:
return x
def __setitem__(self, key, value):
"""defines self[key] = value"""
key = self.key(key)
if key in self.data_with_same_key:
self.data_with_same_key[key] += [self.data[key]]
elif key in self.data:
self.data_with_same_key[key] = [self.data[key]]
self.data[key] = value
def __getitem__(self, key):
"""defines self[key]"""
return self.data[self.key(key)]
def __delitem__(self, key):
"""remove only most current key-entry"""
key = self.key(key)
if key in self.data_with_same_key:
if len(self.data_with_same_key[key]) == 1:
self.data[key] = self.data_with_same_key.pop(key)[0]
else:
self.data[key] = self.data_with_same_key[key].pop(-1)
else:
del self.data[key]
def truncate(self, max_len, min_iter):
if len(self) > max_len:
for k in self.keys():
if self[k]['iteration'] < min_iter:
del self[k] # only deletes one item with k as key, should delete all?
class SolutionDictOld(dict):
"""depreciated, SolutionDict should do, to be removed after SolutionDict
has been successfully applied.
dictionary with computation of an hash key for the inserted solutions and
stack of previously inserted same solutions.
Each entry is meant to store additional information related to the solution.
Methods ``pop`` and ``get`` are modified accordingly.
d = SolutionDict()
x = array([1,2,4])
d.insert(x, {'x': x, 'iteration': 1})
d.get(x) == d[d.key(x)] if d.key(x) in d.keys() else d.get(x) is None
TODO: not yet tested
TODO: behaves like a stack (see _pop_derived), but rather should behave like a queue?!
A queue is less consistent with the operation self[key] = ..., if self.more[key] is not empty.
"""
def __init__(self):
self.more = {} # previously inserted same solutions
self._pop_base = self.pop
self.pop = self._pop_derived
self._get_base = self.get
self.get = self._get_derived
def key(self, x):
"""compute the hash key of ``x``"""
return tuple(x)
def insert(self, x, datadict):
key = self.key(x)
if key in self.more:
self.more[key] += [self[key]]
elif key in self:
self.more[key] = [self[key]]
self[key] = datadict
def _get_derived(self, x, default=None):
return self._get_base(self.key(x), default)
def _pop_derived(self, x):
key = self.key(x)
res = self[key]
if key in self.more:
if len(self.more[key]) == 1:
self[key] = self.more.pop(key)[0]
else:
self[key] = self.more[key].pop(-1)
return res
class BestSolution(object):
"""container to keep track of the best solution seen"""
def __init__(self, x=None, f=np.inf, evals=None):
"""initialize the best solution with `x`, `f`, and `evals`.
Better solutions have smaller `f`-values.
"""
self.x = x
self.x_geno = None
self.f = f if f is not None and f is not np.nan else np.inf
self.evals = evals
self.evalsall = evals
self.last = BlancClass()
self.last.x = x
self.last.f = f
def update(self, arx, xarchive=None, arf=None, evals=None):
"""checks for better solutions in list `arx`, based on the smallest
corresponding value in `arf`, alternatively, `update` may be called
with a `BestSolution` instance like ``update(another_best_solution)``
in which case the better solution becomes the current best.
`xarchive` is used to retrieve the genotype of a solution.
"""
if arf is not None: # find failsave minimum
minidx = np.nanargmin(arf)
if minidx is np.nan:
return
minarf = arf[minidx]
# minarf = reduce(lambda x, y: y if y and y is not np.nan and y < x else x, arf, np.inf)
if type(arx) == BestSolution:
self.evalsall = max((self.evalsall, arx.evalsall))
if arx.f is not None and arx.f < np.inf:
self.update([arx.x], xarchive, [arx.f], arx.evals)
return self
elif minarf < np.inf and (minarf < self.f or self.f is None):
self.x, self.f = arx[minidx], arf[minidx]
self.x_geno = xarchive[self.x]['geno'] if xarchive is not None else None
self.evals = None if not evals else evals - len(arf) + minidx+1
self.evalsall = evals
elif evals:
self.evalsall = evals
self.last.x = arx[minidx]
self.last.f = minarf
def get(self):
"""return ``(x, f, evals)`` """
return self.x, self.f, self.evals, self.x_geno
#____________________________________________________________
#____________________________________________________________
#
class BoundPenalty(object):
"""Computes the boundary penalty. Must be updated each iteration,
using the `update` method.
Details
-------
The penalty computes like ``sum(w[i] * (x[i]-xfeas[i])**2)``,
where `xfeas` is the closest feasible (in-bounds) solution from `x`.
The weight `w[i]` should be updated during each iteration using
the update method.
This class uses `GenoPheno.into_bounds` in method `update` to access
domain boundary values and repair. This inconsistency might be
removed in future.
"""
def __init__(self, bounds=None):
"""Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]``
are lower and upper domain boundaries, each is either `None` or
a scalar or a list or array of appropriate size.
"""
##
# bounds attribute reminds the domain boundary values
self.bounds = bounds
self.gamma = 1 # a very crude assumption
self.weights_initialized = False # gamma becomes a vector after initialization
self.hist = [] # delta-f history
def has_bounds(self):
"""return True, if any variable is bounded"""
bounds = self.bounds
if bounds in (None, [None, None]):
return False
for i in xrange(bounds[0]):
if bounds[0][i] is not None and bounds[0][i] > -np.inf:
return True
for i in xrange(bounds[1]):
if bounds[1][i] is not None and bounds[1][i] < np.inf:
return True
return False
def repair(self, x, bounds=None, copy=False, copy_always=False):
"""sets out-of-bounds components of ``x`` on the bounds.
Arguments
---------
`bounds`
can be `None`, in which case the "default" bounds are used,
or ``[lb, ub]``, where `lb` and `ub`
represent lower and upper domain bounds respectively that
can be `None` or a scalar or a list or array of length ``len(self)``
code is more or less copy-paste from Solution.repair, but never tested
"""
# TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound
# TODO: test whether np.max([bounds[0], x], axis=0) etc is speed relevant
if bounds is None:
bounds = self.bounds
if copy_always:
x_out = array(x, copy=True)
if bounds not in (None, [None, None], (None, None)): # solely for effiency
x_out = array(x, copy=True) if copy and not copy_always else x
if bounds[0] is not None:
if np.isscalar(bounds[0]):
for i in xrange(len(x)):
x_out[i] = max([bounds[0], x[i]])
else:
for i in xrange(len(x)):
if bounds[0][i] is not None:
x_out[i] = max([bounds[0][i], x[i]])
if bounds[1] is not None:
if np.isscalar(bounds[1]):
for i in xrange(len(x)):
x_out[i] = min([bounds[1], x[i]])
else:
for i in xrange(len(x)):
if bounds[1][i] is not None:
x_out[i] = min([bounds[1][i], x[i]])
return x_out # convenience return
#____________________________________________________________
#
def __call__(self, x, archive, gp):
"""returns the boundary violation penalty for `x` ,where `x` is a
single solution or a list or array of solutions.
If `bounds` is not `None`, the values in `bounds` are used, see `__init__`"""
if x in (None, (), []):
return x
if gp.bounds in (None, [None, None], (None, None)):
return 0.0 if np.isscalar(x[0]) else [0.0] * len(x) # no penalty
x_is_single_vector = np.isscalar(x[0])
x = [x] if x_is_single_vector else x
pen = []
for xi in x:
# CAVE: this does not work with already repaired values!!
# CPU(N,lam,iter=20,200,100)?: 3s of 10s, array(xi): 1s (check again)
# remark: one deep copy can be prevented by xold = xi first
xpheno = gp.pheno(archive[xi]['geno'])
xinbounds = gp.into_bounds(xpheno)
fac = 1 # exp(0.1 * (log(self.scal) - np.mean(self.scal)))
pen.append(sum(self.gamma * ((xinbounds - xpheno) / fac)**2) / len(xi))
return pen[0] if x_is_single_vector else pen
#____________________________________________________________
#
def feasible_ratio(self, solutions):
"""counts for each coordinate the number of feasible values in
``solutions`` and returns an array of length ``len(solutions[0])``
with the ratios.
`solutions` is a list or array of repaired `Solution` instances
"""
count = np.zeros(len(solutions[0]))
for x in solutions:
count += x.unrepaired == x
return count / float(len(solutions))
#____________________________________________________________
#
def update(self, function_values, es, bounds=None):
"""updates the weights for computing a boundary penalty.
Arguments
---------
`function_values`
all function values of recent population of solutions
`es`
`CMAEvolutionStrategy` object instance, in particular the
method `into_bounds` of the attribute `gp` of type `GenoPheno`
is used.
`bounds`
not (yet) in use other than for ``bounds == [None, None]`` nothing
is updated.
Reference: Hansen et al 2009, A Method for Handling Uncertainty...
IEEE TEC, with addendum at http://www.lri.fr/~hansen/TEC2009online.pdf
"""
if bounds is None:
bounds = self.bounds
if bounds is None or (bounds[0] is None and bounds[1] is None): # no bounds ==> no penalty
return self # len(function_values) * [0.0] # case without voilations
N = es.N
### prepare
# compute varis = sigma**2 * C_ii
varis = es.sigma**2 * array(N * [es.C] if np.isscalar(es.C) else ( # scalar case
es.C if np.isscalar(es.C[0]) else # diagonal matrix case
[es.C[i][i] for i in xrange(N)])) # full matrix case
dmean = (es.mean - es.gp.into_bounds(es.mean)) / varis**0.5
### Store/update a history of delta fitness value
fvals = sorted(function_values)
l = 1 + len(fvals)
val = fvals[3*l // 4] - fvals[l // 4] # exact interquartile range apart interpolation
val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration
# insert val in history
if np.isfinite(val) and val > 0:
self.hist.insert(0, val)
elif val == inf and len(self.hist) > 1:
self.hist.insert(0, max(self.hist))
else:
pass # ignore 0 or nan values
if len(self.hist) > 20 + (3*N) / es.popsize:
self.hist.pop()
### prepare
dfit = np.median(self.hist) # median interquartile range
damp = min(1, es.sp.mueff/10./N)
### set/update weights
# Throw initialization error
if len(self.hist) == 0:
raise _Error('wrongful initialization, no feasible solution sampled. ' +
'Reasons can be mistakenly set bounds (lower bound not smaller than upper bound) or a too large initial sigma0 or... ' +
'See description of argument func in help(cma.fmin) or an example handling infeasible solutions in help(cma.CMAEvolutionStrategy). ')
# initialize weights
if (dmean.any() and (not self.weights_initialized or es.countiter == 2)): # TODO
self.gamma = array(N * [2*dfit])
self.weights_initialized = True
# update weights gamma
if self.weights_initialized:
edist = array(abs(dmean) - 3 * max(1, N**0.5/es.sp.mueff))
if 1 < 3: # this is better, around a factor of two
# increase single weights possibly with a faster rate than they can decrease
# value unit of edst is std dev, 3==random walk of 9 steps
self.gamma *= exp((edist>0) * np.tanh(edist/3) / 2.)**damp
# decrease all weights up to the same level to avoid single extremely small weights
# use a constant factor for pseudo-keeping invariance
self.gamma[self.gamma > 5 * dfit] *= exp(-1./3)**damp
# self.gamma[idx] *= exp(5*dfit/self.gamma[idx] - 1)**(damp/3)
elif 1 < 3 and (edist>0).any(): # previous method
# CAVE: min was max in TEC 2009
self.gamma[edist>0] *= 1.1**min(1, es.sp.mueff/10./N)
# max fails on cigtab(N=12,bounds=[0.1,None]):
# self.gamma[edist>0] *= 1.1**max(1, es.sp.mueff/10./N) # this was a bug!?
# self.gamma *= exp((edist>0) * np.tanh(edist))**min(1, es.sp.mueff/10./N)
else: # alternative version, but not better
solutions = es.pop # this has not been checked
r = self.feasible_ratio(solutions) # has to be the averaged over N iterations
self.gamma *= exp(np.max([N*[0], 0.3 - r], axis=0))**min(1, es.sp.mueff/10/N)
es.more_to_write = self.gamma if self.weights_initialized else np.ones(N)
### return penalty
# es.more_to_write = self.gamma if not np.isscalar(self.gamma) else N*[1]
return self # bound penalty values
#____________________________________________________________
#____________________________________________________________
#
class GenoPhenoBase(object):
"""depreciated, abstract base class for genotyp-phenotype transformation,
to be implemented.
See (and rather use) option ``transformation`` of ``fmin`` or ``CMAEvolutionStrategy``.
Example
-------
::
import cma
class Mygpt(cma.GenoPhenoBase):
def pheno(self, x):
return x # identity for the time being
gpt = Mygpt()
optim = cma.CMAEvolutionStrategy(...)
while not optim.stop():
X = optim.ask()
f = [func(gpt.pheno(x)) for x in X]
optim.tell(X, f)
In case of a repair, we might pass the repaired solution into `tell()`
(with check_points being True).
TODO: check usecases in `CMAEvolutionStrategy` and implement option GenoPhenoBase
"""
def pheno(self, x):
raise NotImplementedError()
return x
#____________________________________________________________
#____________________________________________________________
#
class GenoPheno(object):
"""Genotype-phenotype transformation.
Method `pheno` provides the transformation from geno- to phenotype,
that is from the internal representation to the representation used
in the objective function. Method `geno` provides the "inverse" pheno-
to genotype transformation. The geno-phenotype transformation comprises,
in this order:
- insert fixed variables (with the phenotypic and therefore quite
possibly "wrong" values)
- affine linear transformation (scaling and shift)
- user-defined transformation
- projection into feasible domain (boundaries)
- assign fixed variables their original phenotypic value
By default all transformations are the identity. The boundary
transformation is only applied, if the boundaries are given as argument to
the method `pheno` or `geno` respectively.
``geno`` is not really necessary and might disappear in future.
"""
def __init__(self, dim, scaling=None, typical_x=None, bounds=None, fixed_values=None, tf=None):
"""return `GenoPheno` instance with fixed dimension `dim`.
Keyword Arguments
-----------------
`scaling`
the diagonal of a scaling transformation matrix, multipliers
in the genotyp-phenotyp transformation, see `typical_x`
`typical_x`
``pheno = scaling*geno + typical_x``
`bounds` (obsolete, might disappear)
list with two elements,
lower and upper bounds both can be a scalar or a "vector"
of length dim or `None`. Without effect, as `bounds` must
be given as argument to `pheno()`.
`fixed_values`
a dictionary of variable indices and values, like ``{0:2.0, 2:1.1}``,
that a not subject to change, negative indices are dropped
(they act like incommenting the index), values are phenotypic
values.
`tf`
list of two user-defined transformation functions, or `None`.
``tf[0]`` is a function that transforms the internal representation
as used by the optimizer into a solution as used by the
objective function. ``tf[1]`` does the back-transformation.
For example ::
tf_0 = lambda x: [xi**2 for xi in x]
tf_1 = lambda x: [abs(xi)**0.5 fox xi in x]
or "equivalently" without the `lambda` construct ::
def tf_0(x):
return [xi**2 for xi in x]
def tf_1(x):
return [abs(xi)**0.5 fox xi in x]
``tf=[tf_0, tf_1]`` is a reasonable way to guaranty that only positive
values are used in the objective function.
Details
-------
If ``tf_1`` is ommitted, the initial x-value must be given as genotype (as the
phenotype-genotype transformation is unknown) and injection of solutions
might lead to unexpected results.
"""
self.N = dim
self.bounds = bounds
self.fixed_values = fixed_values
if tf is not None:
self.tf_pheno = tf[0]
self.tf_geno = tf[1] # TODO: should not necessarily be needed
# r = np.random.randn(dim)
# assert all(tf[0](tf[1](r)) - r < 1e-7)
# r = np.random.randn(dim)
# assert all(tf[0](tf[1](r)) - r > -1e-7)
print("WARNING in class GenoPheno: user defined transformations have not been tested thoroughly")
else:
self.tf_geno = None
self.tf_pheno = None
if fixed_values:
if type(fixed_values) is not dict:
raise _Error("fixed_values must be a dictionary {index:value,...}")
if max(fixed_values.keys()) >= dim:
raise _Error("max(fixed_values.keys()) = " + str(max(fixed_values.keys())) +
" >= dim=N=" + str(dim) + " is not a feasible index")
# convenience commenting functionality: drop negative keys
for k in fixed_values.keys():
if k < 0:
fixed_values.pop(k)
if bounds:
if len(bounds) != 2:
raise _Error('len(bounds) must be 2 for lower and upper bounds')
for i in (0,1):
if bounds[i] is not None:
bounds[i] = array(dim * [bounds[i]] if np.isscalar(bounds[i]) else
[b for b in bounds[i]])
def vec_is_default(vec, default_val=0):
"""None or [None] are also recognized as default"""
try:
if len(vec) == 1:
vec = vec[0] # [None] becomes None and is always default
else:
return False
except TypeError:
pass # vec is a scalar
if vec is None or vec == array(None) or vec == default_val:
return True
return False
self.scales = array(scaling)
if vec_is_default(self.scales, 1):
self.scales = 1 # CAVE: 1 is not array(1)
elif self.scales.shape is not () and len(self.scales) != self.N:
raise _Error('len(scales) == ' + str(len(self.scales)) +
' does not match dimension N == ' + str(self.N))
self.typical_x = array(typical_x)
if vec_is_default(self.typical_x, 0):
self.typical_x = 0
elif self.typical_x.shape is not () and len(self.typical_x) != self.N:
raise _Error('len(typical_x) == ' + str(len(self.typical_x)) +
' does not match dimension N == ' + str(self.N))
if (self.scales is 1 and
self.typical_x is 0 and
self.bounds in (None, [None, None]) and
self.fixed_values is None and
self.tf_pheno is None):
self.isidentity = True
else:
self.isidentity = False
def into_bounds(self, y, bounds=None, copy_never=False, copy_always=False):
"""Argument `y` is a phenotypic vector,
return `y` put into boundaries, as a copy iff ``y != into_bounds(y)``.
Note: this code is duplicated in `Solution.repair` and might
disappear in future.
"""
bounds = bounds if bounds is not None else self.bounds
if bounds in (None, [None, None]):
return y if not copy_always else array(y, copy=True)
if bounds[0] is not None:
if len(bounds[0]) not in (1, len(y)):
raise ValueError('len(bounds[0]) = ' + str(len(bounds[0])) +
' and len of initial solution (' + str(len(y)) + ') disagree')
if copy_never: # is rather slower
for i in xrange(len(y)):
y[i] = max(bounds[0][i], y[i])
else:
y = np.max([bounds[0], y], axis=0)
if bounds[1] is not None:
if len(bounds[1]) not in (1, len(y)):
raise ValueError('len(bounds[1]) = ' + str(len(bounds[1])) +
' and initial solution (' + str(len(y)) + ') disagree')
if copy_never:
for i in xrange(len(y)):
y[i] = min(bounds[1][i], y[i])
else:
y = np.min([bounds[1], y], axis=0)
return y
def pheno(self, x, bounds=None, copy=True, copy_always=False):
"""maps the genotypic input argument into the phenotypic space,
boundaries are only applied if argument ``bounds is not None``, see
help for class `GenoPheno`
"""
if copy_always and not copy:
raise ValueError('arguments copy_always=' + str(copy_always) +
' and copy=' + str(copy) + ' have inconsistent values')
if self.isidentity and bounds in (None, [None, None], (None, None)):
return x if not copy_always else array(x, copy=copy_always)
if self.fixed_values is None:
y = array(x, copy=copy) # make a copy, in case
else: # expand with fixed values
y = list(x) # is a copy
for i in sorted(self.fixed_values.keys()):
y.insert(i, self.fixed_values[i])
y = array(y, copy=False)
if self.scales is not 1: # just for efficiency
y *= self.scales
if self.typical_x is not 0:
y += self.typical_x
if self.tf_pheno is not None:
y = array(self.tf_pheno(y), copy=False)
if bounds is not None:
y = self.into_bounds(y, bounds)
if self.fixed_values is not None:
for i, k in self.fixed_values.items():
y[i] = k
return y
def geno(self, y, bounds=None, copy=True, copy_always=False, archive=None):
"""maps the phenotypic input argument into the genotypic space.
If `bounds` are given, first `y` is projected into the feasible
domain. In this case ``copy==False`` leads to a copy.
by default a copy is made only to prevent to modify ``y``
method geno is only needed if external solutions are injected
(geno(initial_solution) is depreciated and will disappear)
TODO: arg copy=True should become copy_never=False
"""
if archive is not None and bounds is not None:
try:
return archive[y]['geno']
except:
pass
x = array(y, copy=(copy and not self.isidentity) or copy_always)
# bounds = self.bounds if bounds is None else bounds
if bounds is not None: # map phenotyp into bounds first
x = self.into_bounds(x, bounds)
if self.isidentity:
return x
# user-defined transformation
if self.tf_geno is not None:
x = array(self.tf_geno(x), copy=False)
# affine-linear transformation: shift and scaling
if self.typical_x is not 0:
x -= self.typical_x
if self.scales is not 1: # just for efficiency
x /= self.scales
# kick out fixed_values
if self.fixed_values is not None:
# keeping the transformed values does not help much
# therefore it is omitted
if 1 < 3:
keys = sorted(self.fixed_values.keys())
x = array([x[i] for i in range(len(x)) if i not in keys], copy=False)
else: # TODO: is this more efficient?
x = list(x)
for key in sorted(self.fixed_values.keys(), reverse=True):
x.remove(key)
x = array(x, copy=False)
return x
#____________________________________________________________
#____________________________________________________________
# check out built-in package abc: class ABCMeta, abstractmethod, abstractproperty...
# see http://docs.python.org/whatsnew/2.6.html PEP 3119 abstract base classes
#
class OOOptimizer(object):
""""abstract" base class for an OO optimizer interface with methods
`__init__`, `ask`, `tell`, `stop`, `result`, and `optimize`. Only
`optimize` is fully implemented in this base class.
Examples
--------
All examples minimize the function `elli`, the output is not shown.
(A preferred environment to execute all examples is ``ipython -pylab``.)
First we need ::
from cma import CMAEvolutionStrategy, CMADataLogger # CMAEvolutionStrategy derives from the OOOptimizer class
elli = lambda x: sum(1e3**((i-1.)/(len(x)-1.)*x[i])**2 for i in range(len(x)))
The shortest example uses the inherited method `OOOptimizer.optimize()`::
res = CMAEvolutionStrategy(8 * [0.1], 0.5).optimize(elli)
The input parameters to `CMAEvolutionStrategy` are specific to this
inherited class. The remaining functionality is based on interface
defined by `OOOptimizer`. We might have a look at the result::
print(res[0]) # best solution and
print(res[1]) # its function value
`res` is the return value from method
`CMAEvolutionStrategy.result()` appended with `None` (no logger).
In order to display more exciting output we rather do ::
logger = CMADataLogger() # derives from the abstract BaseDataLogger class
res = CMAEvolutionStrategy(9 * [0.5], 0.3).optimize(elli, logger)
logger.plot() # if matplotlib is available, logger == res[-1]
or even shorter ::
res = CMAEvolutionStrategy(9 * [0.5], 0.3).optimize(elli, CMADataLogger())
res[-1].plot() # if matplotlib is available
Virtually the same example can be written with an explicit loop
instead of using `optimize()`. This gives the necessary insight into
the `OOOptimizer` class interface and gives entire control over the
iteration loop::
optim = CMAEvolutionStrategy(9 * [0.5], 0.3) # a new CMAEvolutionStrategy instance calling CMAEvolutionStrategy.__init__()
logger = CMADataLogger(optim) # get a logger instance
# this loop resembles optimize()
while not optim.stop(): # iterate
X = optim.ask() # get candidate solutions
f = [elli(x) for x in X] # evaluate solutions
# maybe do something else that needs to be done
optim.tell(X, f) # do all the real work: prepare for next iteration
optim.disp(20) # display info every 20th iteration
logger.add() # log another "data line"
# final output
print('termination by', optim.stop())
print('best f-value =', optim.result()[1])
print('best solution =', optim.result()[0])
logger.plot() # if matplotlib is available
raw_input('press enter to continue') # prevents exiting and closing figures
Details
-------
Most of the work is done in the method `tell(...)`. The method `result()` returns
more useful output.
"""
@staticmethod
def abstract():
"""marks a method as abstract, ie to be implemented by a subclass"""
import inspect
caller = inspect.getouterframes(inspect.currentframe())[1][3]
raise NotImplementedError('method ' + caller + '() must be implemented in subclass')
def __init__(self, xstart, **more_args):
"""abstract method, ``xstart`` is a mandatory argument"""
OOOptimizer.abstract()
def initialize(self):
"""(re-)set to the initial state"""
OOOptimizer.abstract()
def ask(self):
"""abstract method, AKA "get", deliver new candidate solution(s), a list of "vectors"
"""
OOOptimizer.abstract()
def tell(self, solutions, function_values):
"""abstract method, AKA "update", prepare for next iteration"""
OOOptimizer.abstract()
def stop(self):
"""abstract method, return satisfied termination conditions in a dictionary like
``{'termination reason': value, ...}``, for example ``{'tolfun': 1e-12}``, or the empty
dictionary ``{}``. The implementation of `stop()` should prevent an infinite loop.
"""
OOOptimizer.abstract()
def disp(self, modulo=None):
"""abstract method, display some iteration infos if ``self.iteration_counter % modulo == 0``"""
OOOptimizer.abstract()
def result(self):
"""abstract method, return ``(x, f(x), ...)``, that is, the minimizer, its function value, ..."""
OOOptimizer.abstract()
def optimize(self, objectivefct, logger=None, verb_disp=20, iterations=None):
"""find minimizer of `objectivefct` by iterating over `OOOptimizer` `self`
with verbosity `verb_disp`, using `BaseDataLogger` `logger` with at
most `iterations` iterations. ::
return self.result() + (self.stop(), self, logger)
Example
-------
>>> import cma
>>> res = cma.CMAEvolutionStrategy(7 * [0.1], 0.5).optimize(cma.fcts.rosen, cma.CMADataLogger(), 100)
(4_w,9)-CMA-ES (mu_w=2.8,w_1=49%) in dimension 7 (seed=630721393)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 9 3.163954777181882e+01 1.0e+00 4.12e-01 4e-01 4e-01 0:0.0
2 18 3.299006223906629e+01 1.0e+00 3.60e-01 3e-01 4e-01 0:0.0
3 27 1.389129389866704e+01 1.1e+00 3.18e-01 3e-01 3e-01 0:0.0
100 900 2.494847340045985e+00 8.6e+00 5.03e-02 2e-02 5e-02 0:0.3
200 1800 3.428234862999135e-01 1.7e+01 3.77e-02 6e-03 3e-02 0:0.5
300 2700 3.216640032470860e-04 5.6e+01 6.62e-03 4e-04 9e-03 0:0.8
400 3600 6.155215286199821e-12 6.6e+01 7.44e-06 1e-07 4e-06 0:1.1
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
('termination by', {'tolfun': 1e-11})
('best f-value =', 1.1189867885201275e-14)
('solution =', array([ 1. , 1. , 1. , 0.99999999, 0.99999998,
0.99999996, 0.99999992]))
>>> print(res[0])
[ 1. 1. 1. 0.99999999 0.99999998 0.99999996
0.99999992]
"""
if logger is None:
if hasattr(self, 'logger'):
logger = self.logger
citer = 0
while not self.stop():
if iterations is not None and citer >= iterations:
return self.result()
citer += 1
X = self.ask() # deliver candidate solutions
fitvals = [objectivefct(x) for x in X]
self.tell(X, fitvals) # all the work is done here
self.disp(verb_disp)
logger.add(self) if logger else None
logger.add(self, modulo=bool(logger.modulo)) if logger else None
if verb_disp:
self.disp(1)
if verb_disp in (1, True):
print('termination by', self.stop())
print('best f-value =', self.result()[1])
print('solution =', self.result()[0])
return self.result() + (self.stop(), self, logger)
#____________________________________________________________
#____________________________________________________________
#
class CMAEvolutionStrategy(OOOptimizer):
"""CMA-ES stochastic optimizer class with ask-and-tell interface.
See `fmin` for the one-line-call functional interface.
Calling sequence
================
``optim = CMAEvolutionStrategy(x0, sigma0, opts)``
returns a class instance.
Arguments
---------
`x0`
initial solution, starting point.
`sigma0`
initial standard deviation. The problem
variables should have been scaled, such that a single
standard deviation on all variables is useful and the
optimum is expected to lie within about `x0` +- ``3*sigma0``.
See also options `scaling_of_variables`.
Often one wants to check for solutions close to the initial
point. This allows for an easier check for consistency of
the objective function and its interfacing with the optimizer.
In this case, a much smaller `sigma0` is advisable.
`opts`
options, a dictionary with optional settings,
see class `Options`.
Main interface / usage
======================
The ask-and-tell interface is inherited from the generic `OOOptimizer`
interface for iterative optimization algorithms (see there). With ::
optim = CMAEvolutionStrategy(8 * [0.5], 0.2)
an object instance is generated. In each iteration ::
solutions = optim.ask()
is used to ask for new candidate solutions (possibly several times) and ::
optim.tell(solutions, func_values)
passes the respective function values to `optim`. Instead of `ask()`,
the class `CMAEvolutionStrategy` also provides ::
(solutions, func_values) = optim.ask_and_eval(objective_func)
Therefore, after initialization, an entire optimization can be written
in two lines like ::
while not optim.stop():
optim.tell(*optim.ask_and_eval(objective_func))
Without the freedom of executing additional lines within the iteration,
the same reads in a single line as ::
optim.optimize(objective_func)
Besides for termination criteria, in CMA-ES only
the ranks of the `func_values` are relevant.
Attributes and Properties
=========================
- `inputargs` -- passed input arguments
- `inopts` -- passed options
- `opts` -- actually used options, some of them can be changed any
time, see class `Options`
- `popsize` -- population size lambda, number of candidate solutions
returned by `ask()`
Details
=======
The following two enhancements are turned off by default.
**Active CMA** is implemented with option ``CMA_active`` and conducts
an update of the covariance matrix with negative weights. The
exponential update is implemented, where from a mathematical
viewpoint positive definiteness is guarantied. The update is applied
after the default update and only before the covariance matrix is
decomposed, which limits the additional computational burden to be
at most a factor of three (typically smaller). A typical speed up
factor (number of f-evaluations) is between 1.1 and two.
References: Jastrebski and Arnold, CEC 2006, Glasmachers et al, GECCO 2010.
**Selective mirroring** is implemented with option ``CMA_mirrors`` in
the method ``get_mirror()``. Only the method `ask_and_eval()` will
then sample selectively mirrored vectors. In selective mirroring, only
the worst solutions are mirrored. With the default small number of mirrors,
*pairwise selection* (where at most one of the two mirrors contribute to the
update of the distribution mean) is implicitely guarantied under selective
mirroring and therefore not explicitly implemented.
References: Brockhoff et al, PPSN 2010, Auger et al, GECCO 2011.
Examples
========
Super-short example, with output shown:
>>> import cma
>>> # construct an object instance in 4-D, sigma0=1
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'seed':234})
(4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=234)
>>>
>>> # iterate until termination
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [cma.fcts.elli(x) for x in X])
... es.disp() # by default sparse, see option verb_disp
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 8 2.093015112685775e+04 1.0e+00 9.27e-01 9e-01 9e-01 0:0.0
2 16 4.964814235917688e+04 1.1e+00 9.54e-01 9e-01 1e+00 0:0.0
3 24 2.876682459926845e+05 1.2e+00 1.02e+00 9e-01 1e+00 0:0.0
100 800 6.809045875281943e-01 1.3e+02 1.41e-02 1e-04 1e-02 0:0.2
200 1600 2.473662150861846e-10 8.0e+02 3.08e-05 1e-08 8e-06 0:0.5
233 1864 2.766344961865341e-14 8.6e+02 7.99e-07 8e-11 7e-08 0:0.6
>>>
>>> cma.pprint(es.result())
(Solution([ -1.98546755e-09, -1.10214235e-09, 6.43822409e-11,
-1.68621326e-11]),
4.5119610261406537e-16,
1666,
1672,
209,
array([ -9.13545269e-09, -1.45520541e-09, -6.47755631e-11,
-1.00643523e-11]),
array([ 3.20258681e-08, 3.15614974e-09, 2.75282215e-10,
3.27482983e-11]))
>>>
>>> # help(es.result) shows
result(self) method of cma.CMAEvolutionStrategy instance
return ``(xbest, f(xbest), evaluations_xbest, evaluations, iterations, pheno(xmean), effective_stds)``
Using the multiprocessing module, we can evaluate the function in parallel with a simple
modification of the example ::
import multiprocessing
# prepare es = ...
pool = multiprocessing.Pool(es.popsize)
while not es.stop():
X = es.ask()
es.tell(X, pool.map_async(cma.fcts.elli, X))
Example with a data logger, lower bounds (at zero) and handling infeasible solutions:
>>> import cma
>>> import numpy as np
>>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.5, {'bounds': [0, np.inf]})
>>> logger = cma.CMADataLogger().register(es)
>>> while not es.stop():
... fit, X = [], []
... while len(X) < es.popsize:
... curr_fit = np.NaN
... while curr_fit is np.NaN:
... x = es.ask(1)[0]
... curr_fit = cma.fcts.somenan(x, cma.fcts.elli) # might return np.NaN
... X.append(x)
... fit.append(curr_fit)
... es.tell(X, fit)
... logger.add()
... es.disp()
<output omitted>
>>>
>>> assert es.result()[1] < 1e-9
>>> assert es.result()[2] < 9000 # by internal termination
>>> logger.plot() # plot data
>>> cma.show()
>>> print(' *** if execution stalls close the figure window to continue (and check out ipython --pylab) ***')
Example implementing restarts with increasing popsize (IPOP), output is not displayed:
>>> import cma, numpy as np
>>>
>>> # restart with increasing population size (IPOP)
>>> bestever = cma.BestSolution()
>>> for lam in 10 * 2**np.arange(7): # 10, 20, 40, 80, ..., 10 * 2**6
... es = cma.CMAEvolutionStrategy('6 - 8 * np.random.rand(9)', # 9-D
... 5, # initial std sigma0
... {'popsize': lam,
... 'verb_append': bestever.evalsall}) # pass options
... logger = cma.CMADataLogger().register(es, append=bestever.evalsall)
... while not es.stop():
... X = es.ask() # get list of new solutions
... fit = [cma.fcts.rastrigin(x) for x in X] # evaluate each solution
... es.tell(X, fit) # besides for termination only the ranking in fit is used
...
... # display some output
... logger.add() # add a "data point" to the log, writing in files
... es.disp() # uses option verb_disp with default 100
...
... print('termination:', es.stop())
... cma.pprint(es.best.__dict__)
...
... bestever.update(es.best)
...
... # show a plot
... logger.plot();
... if bestever.f < 1e-8: # global optimum was hit
... break
<output omitted>
>>> assert es.result()[1] < 1e-8
On the Rastrigin function, usually after five restarts the global optimum
is located.
The final example shows how to resume:
>>> import cma, pickle
>>>
>>> es = cma.CMAEvolutionStrategy(12 * [0.1], # a new instance, 12-D
... 0.5) # initial std sigma0
>>> logger = cma.CMADataLogger().register(es)
>>> es.optimize(cma.fcts.rosen, logger, iterations=100)
>>> logger.plot()
>>> pickle.dump(es, open('saved-cma-object.pkl', 'w'))
>>> print('saved')
>>> del es, logger # let's start fresh
>>>
>>> es = pickle.load(open('saved-cma-object.pkl'))
>>> print('resumed')
>>> logger = cma.CMADataLogger(es.opts['verb_filenameprefix'] # use same name
... ).register(es, True) # True: append to old log data
>>> es.optimize(cma.fcts.rosen, logger, verb_disp=200)
>>> assert es.result()[2] < 15000
>>> cma.pprint(es.result())
>>> logger.plot()
Missing Features
================
Option ``randn`` to pass a random number generator.
:See: `fmin()`, `Options`, `plot()`, `ask()`, `tell()`, `ask_and_eval()`
"""
# __all__ = () # TODO this would be the interface
#____________________________________________________________
@property # read only attribute decorator for a method
def popsize(self):
"""number of samples by default returned by` ask()`
"""
return self.sp.popsize
# this is not compatible with python2.5:
# @popsize.setter
# def popsize(self, p):
# """popsize cannot be set (this might change in future)
# """
# raise _Error("popsize cannot be changed (this might change in future)")
#____________________________________________________________
#____________________________________________________________
def stop(self, check=True):
# this doc string is available via help cma.CMAEvolutionStrategy.stop
"""return a dictionary with the termination status.
With ``check==False``, the termination conditions are not checked and
the status might not reflect the current situation.
"""
if (check and self.countiter > 0 and self.opts['termination_callback'] and
self.opts['termination_callback'] != str(self.opts['termination_callback'])):
self.callbackstop = self.opts['termination_callback'](self)
return self.stopdict(self if check else None) # update the stopdict and return a Dict
#____________________________________________________________
#____________________________________________________________
def __init__(self, x0, sigma0, inopts = {}):
"""see class `CMAEvolutionStrategy`
"""
self.inputargs = dict(locals()) # for the record
del self.inputargs['self'] # otherwise the instance self has a cyclic reference
self.inopts = inopts
opts = Options(inopts).complement() # Options() == fmin([],[]) == defaultOptions()
if opts['noise_handling'] and eval(opts['noise_handling']):
raise ValueError('noise_handling not available with class CMAEvolutionStrategy, use function fmin')
if opts['restarts'] and eval(opts['restarts']):
raise ValueError('restarts not available with class CMAEvolutionStrategy, use function fmin')
if x0 == str(x0):
x0 = eval(x0)
self.mean = array(x0) # should not have column or row, is just 1-D
if self.mean.ndim == 2:
print('WARNING: input x0 should be a list or 1-D array, trying to flatten ' +
str(self.mean.shape) + '-array')
if self.mean.shape[0] == 1:
self.mean = self.mean[0]
elif self.mean.shape[1] == 1:
self.mean = array([x[0] for x in self.mean])
if self.mean.ndim != 1:
raise _Error('x0 must be 1-D array')
if len(self.mean) <= 1:
raise _Error('optimization in 1-D is not supported (code was never tested)')
self.N = self.mean.shape[0]
N = self.N
self.mean.resize(N) # 1-D array, not really necessary?!
self.x0 = self.mean
self.mean = self.x0.copy() # goes to initialize
self.sigma0 = sigma0
if isinstance(sigma0, str): # TODO: no real need here (do rather in fmin)
self.sigma0 = eval(sigma0) # like '1./N' or 'np.random.rand(1)[0]+1e-2'
if np.size(self.sigma0) != 1 or np.shape(self.sigma0):
raise _Error('input argument sigma0 must be (or evaluate to) a scalar')
self.sigma = self.sigma0 # goes to inialize
# extract/expand options
opts.evalall(locals()) # using only N
self.opts = opts
self.randn = opts['randn']
self.gp = GenoPheno(N, opts['scaling_of_variables'], opts['typical_x'],
opts['bounds'], opts['fixed_variables'], opts['transformation'])
self.boundPenalty = BoundPenalty(self.gp.bounds)
s = self.gp.geno(self.mean)
self.mean = self.gp.geno(self.mean, bounds=self.gp.bounds)
self.N = len(self.mean)
N = self.N
if (self.mean != s).any():
print('WARNING: initial solution is out of the domain boundaries:')
print(' x0 = ' + str(self.inputargs['x0']))
print(' ldom = ' + str(self.gp.bounds[0]))
print(' udom = ' + str(self.gp.bounds[1]))
self.fmean = np.NaN # TODO name should change? prints nan (OK with matlab&octave)
self.fmean_noise_free = 0. # for output only
self.sp = CMAParameters(N, opts)
self.sp0 = self.sp
# initialization of state variables
self.countiter = 0
self.countevals = max((0, opts['verb_append'])) if type(opts['verb_append']) is not bool else 0
self.ps = np.zeros(N)
self.pc = np.zeros(N)
stds = np.ones(N)
if np.all(self.opts['CMA_teststds']): # also 0 would not make sense
stds = self.opts['CMA_teststds']
if np.size(stds) != N:
raise _Error('CMA_teststds option must have dimension = ' + str(N))
if self.opts['CMA_diagonal']: # is True or > 0
# linear time and space complexity
self.B = array(1) # works fine with np.dot(self.B, anything) and self.B.T
self.C = stds**2 # TODO: remove this!?
self.dC = self.C
else:
self.B = np.eye(N) # identity(N), do not from matlib import *, as eye is a matrix there
# prevent equal eigenvals, a hack for np.linalg:
self.C = np.diag(stds**2 * exp(1e-6*(np.random.rand(N)-0.5)))
self.dC = np.diag(self.C)
self.Zneg = np.zeros((N, N))
self.D = stds
self.flgtelldone = True
self.itereigenupdated = self.countiter
self.noiseS = 0 # noise "signal"
self.hsiglist = []
if not opts['seed']:
np.random.seed()
six_decimals = (time.time() - 1e6 * (time.time() // 1e6))
opts['seed'] = 1e5 * np.random.rand() + six_decimals + 1e5 * (time.time() % 1)
opts['seed'] = int(opts['seed'])
np.random.seed(opts['seed'])
self.sent_solutions = SolutionDict()
self.best = BestSolution()
out = {} # TODO: obsolete, replaced by method results()?
out['best'] = self.best
# out['hsigcount'] = 0
out['termination'] = {}
self.out = out
self.const = BlancClass()
self.const.chiN = N**0.5*(1-1./(4.*N)+1./(21.*N**2)) # expectation of norm(randn(N,1))
# attribute for stopping criteria in function stop
self.stopdict = CMAStopDict()
self.callbackstop = 0
self.fit = BlancClass()
self.fit.fit = [] # not really necessary
self.fit.hist = [] # short history of best
self.fit.histbest = [] # long history of best
self.fit.histmedian = [] # long history of median
self.more_to_write = [] #[1, 1, 1, 1] # N*[1] # needed when writing takes place before setting
# say hello
if opts['verb_disp'] > 0:
sweighted = '_w' if self.sp.mu > 1 else ''
smirr = 'mirr%d' % (self.sp.lam_mirr) if self.sp.lam_mirr else ''
print('(%d' % (self.sp.mu) + sweighted + ',%d' % (self.sp.popsize) + smirr + ')-CMA-ES' +
' (mu_w=%2.1f,w_1=%d%%)' % (self.sp.mueff, int(100*self.sp.weights[0])) +
' in dimension %d (seed=%d, %s)' % (N, opts['seed'], time.asctime())) # + func.__name__
if opts['CMA_diagonal'] and self.sp.CMA_on:
s = ''
if opts['CMA_diagonal'] is not True:
s = ' for '
if opts['CMA_diagonal'] < np.inf:
s += str(int(opts['CMA_diagonal']))
else:
s += str(np.floor(opts['CMA_diagonal']))
s += ' iterations'
s += ' (1/ccov=' + str(round(1./(self.sp.c1+self.sp.cmu))) + ')'
print(' Covariance matrix is diagonal' + s)
#____________________________________________________________
#____________________________________________________________
def ask(self, number=None, xmean=None, sigma_fac=1):
"""get new candidate solutions, sampled from a multi-variate
normal distribution and transformed to f-representation
(phenotype) to be evaluated.
Arguments
---------
`number`
number of returned solutions, by default the
population size ``popsize`` (AKA ``lambda``).
`xmean`
distribution mean
`sigma`
multiplier for internal sample width (standard
deviation)
Return
------
A list of N-dimensional candidate solutions to be evaluated
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy([0,0,0,0], 0.3)
>>> while not es.stop() and es.best.f > 1e-6: # my_desired_target_f_value
... X = es.ask() # get list of new solutions
... fit = [cma.fcts.rosen(x) for x in X] # call function rosen with each solution
... es.tell(X, fit) # feed values
:See: `ask_and_eval`, `ask_geno`, `tell`
"""
pop_geno = self.ask_geno(number, xmean, sigma_fac)
# N,lambda=20,200: overall CPU 7s vs 5s == 40% overhead, even without bounds!
# new data: 11.5s vs 9.5s == 20%
# TODO: check here, whether this is necessary?
# return [self.gp.pheno(x, copy=False, bounds=self.gp.bounds) for x in pop] # probably fine
# return [Solution(self.gp.pheno(x, copy=False), copy=False) for x in pop] # here comes the memory leak, now solved
# pop_pheno = [Solution(self.gp.pheno(x, copy=False), copy=False).repair(self.gp.bounds) for x in pop_geno]
pop_pheno = [self.gp.pheno(x, copy=True, bounds=self.gp.bounds) for x in pop_geno]
if not self.gp.isidentity or use_sent_solutions: # costs 25% in CPU performance with N,lambda=20,200
# archive returned solutions, first clean up archive
if self.countiter % 30/self.popsize**0.5 < 1:
self.sent_solutions.truncate(0, self.countiter - 1 - 3 * self.N/self.popsize**0.5)
# insert solutions
for i in xrange(len(pop_geno)):
self.sent_solutions[pop_pheno[i]] = {'geno': pop_geno[i],
'pheno': pop_pheno[i],
'iteration': self.countiter}
return pop_pheno
#____________________________________________________________
#____________________________________________________________
def ask_geno(self, number=None, xmean=None, sigma_fac=1):
"""get new candidate solutions in genotyp, sampled from a
multi-variate normal distribution.
Arguments are
`number`
number of returned solutions, by default the
population size `popsize` (AKA lambda).
`xmean`
distribution mean
`sigma_fac`
multiplier for internal sample width (standard
deviation)
`ask_geno` returns a list of N-dimensional candidate solutions
in genotyp representation and is called by `ask`.
:See: `ask`, `ask_and_eval`
"""
if number is None or number < 1:
number = self.sp.popsize
if xmean is None:
xmean = self.mean
if self.countiter == 0:
self.tic = time.clock() # backward compatible
self.elapsed_time = ElapsedTime()
if self.opts['CMA_AII']:
if self.countiter == 0:
self.aii = AII(self.x0, self.sigma0)
self.flgtelldone = False
pop = self.aii.ask(number)
return pop
sigma = sigma_fac * self.sigma
# update parameters for sampling the distribution
# fac 0 1 10
# 150-D cigar:
# 50749 50464 50787
# 200-D elli: == 6.9
# 99900 101160
# 100995 103275 == 2% loss
# 100-D elli: == 6.9
# 363052 369325 < 2% loss
# 365075 365755
# update distribution
if self.sp.CMA_on and (
(self.opts['updatecovwait'] is None and
self.countiter >=
self.itereigenupdated + 1./(self.sp.c1+self.sp.cmu)/self.N/10
) or
(self.opts['updatecovwait'] is not None and
self.countiter > self.itereigenupdated + self.opts['updatecovwait']
)):
self.updateBD()
# sample distribution
if self.flgtelldone: # could be done in tell()!?
self.flgtelldone = False
self.ary = []
# each row is a solution
arz = self.randn((number, self.N))
if number == self.sp.popsize:
self.arz = arz
else:
pass
# print 'damn'
if 11 < 3: # normalize the average length to chiN
for i in xrange(len(arz)):
# arz[i] *= exp(self.randn(1)[0] / 8)
ss = sum(arz[i]**2)**0.5
arz[i] *= self.const.chiN / ss
# arz *= 1 * self.const.chiN / np.mean([sum(z**2)**0.5 for z in arz])
# fac = np.mean(sum(arz**2, 1)**0.5)
# print fac
# arz *= self.const.chiN / fac
self.ary = np.dot(self.B, (self.D * arz).T).T
pop = xmean + sigma * self.ary
self.evaluations_per_f_value = 1
return pop
def get_mirror(self, x):
"""return ``pheno(self.mean - (geno(x) - self.mean))``.
TODO: this implementation is yet experimental.
Selectively mirrored sampling improves to a moderate extend but
overadditively with active CMA for quite understandable reasons.
Optimal number of mirrors are suprisingly small: 1,2,3 for maxlam=7,13,20
however note that 3,6,10 are the respective maximal possible mirrors that
must be clearly suboptimal.
"""
try:
# dx = x.geno - self.mean, repair or boundary handling is not taken into account
dx = self.sent_solutions[x]['geno'] - self.mean
except:
print 'WARNING: use of geno is depreciated'
dx = self.gp.geno(x, copy=True) - self.mean
dx *= sum(self.randn(self.N)**2)**0.5 / self.mahalanobisNorm(dx)
x = self.mean - dx
y = self.gp.pheno(x, bounds=self.gp.bounds)
if not self.gp.isidentity or use_sent_solutions: # costs 25% in CPU performance with N,lambda=20,200
self.sent_solutions[y] = {'geno': x,
'pheno': y,
'iteration': self.countiter}
return y
def mirror_penalized(self, f_values, idx):
"""obsolete and subject to removal (TODO),
return modified f-values such that for each mirror one becomes worst.
This function is useless when selective mirroring is applied with no
more than (lambda-mu)/2 solutions.
Mirrors are leading and trailing values in ``f_values``.
"""
assert len(f_values) >= 2 * len(idx)
m = np.max(np.abs(f_values))
for i in len(idx):
if f_values[idx[i]] > f_values[-1-i]:
f_values[idx[i]] += m
else:
f_values[-1-i] += m
return f_values
def mirror_idx_cov(self, f_values, idx1): # will most likely be removed
"""obsolete and subject to removal (TODO),
return indices for negative ("active") update of the covariance matrix
assuming that ``f_values[idx1[i]]`` and ``f_values[-1-i]`` are
the corresponding mirrored values
computes the index of the worse solution sorted by the f-value of the
better solution.
TODO: when the actual mirror was rejected, it is better
to return idx1 instead of idx2.
Remark: this function might not be necessary at all: if the worst solution
is the best mirrored, the covariance matrix updates cancel (cave: weights
and learning rates), which seems what is desirable. If the mirror is bad,
as strong negative update is made, again what is desirable.
And the fitness--step-length correlation is in part addressed by
using flat weights.
"""
idx2 = np.arange(len(f_values) - 1, len(f_values) - 1 - len(idx1), -1)
f = []
for i in xrange(len(idx1)):
f.append(min((f_values[idx1[i]], f_values[idx2[i]])))
# idx.append(idx1[i] if f_values[idx1[i]] > f_values[idx2[i]] else idx2[i])
return idx2[np.argsort(f)][-1::-1]
#____________________________________________________________
#____________________________________________________________
#
def ask_and_eval(self, func, args=(), number=None, xmean=None, sigma_fac=1,
evaluations=1, aggregation=np.median):
"""samples `number` solutions and evaluates them on `func`, where
each solution `s` is resampled until ``func(s) not in (numpy.NaN, None)``.
Arguments
---------
`func`
objective function
`args`
additional parameters for `func`
`number`
number of solutions to be sampled, by default
population size ``popsize`` (AKA lambda)
`xmean`
mean for sampling the solutions, by default ``self.mean``.
`sigma_fac`
multiplier for sampling width, standard deviation, for example
to get a small perturbation of solution `xmean`
`evaluations`
number of evaluations for each sampled solution
`aggregation`
function that aggregates `evaluations` values to
as single value.
Return
------
``(X, fit)``, where
X -- list of solutions
fit -- list of respective function values
Details
-------
When ``func(x)`` returns `NaN` or `None` a new solution is sampled until
``func(x) not in (numpy.NaN, None)``. The argument to `func` can be
freely modified within `func`.
Depending on the ``CMA_mirrors`` option, some solutions are not sampled
independently but as mirrors of other bad solutions. This is a simple
derandomization that can save 10-30% of the evaluations in particular
with small populations, for example on the cigar function.
Example
-------
>>> import cma
>>> x0, sigma0 = 8*[10], 1 # 8-D
>>> es = cma.CMAEvolutionStrategy(x0, sigma0)
>>> while not es.stop():
... X, fit = es.ask_and_eval(cma.fcts.elli) # handles NaN with resampling
... es.tell(X, fit) # pass on fitness values
... es.disp(20) # print every 20-th iteration
>>> print('terminated on ' + str(es.stop()))
<output omitted>
A single iteration step can be expressed in one line, such that
an entire optimization after initialization becomes
::
while not es.stop():
es.tell(*es.ask_and_eval(cma.fcts.elli))
"""
# initialize
popsize = self.sp.popsize
if number is not None:
popsize = number
selective_mirroring = True
nmirrors = self.sp.lam_mirr
if popsize != self.sp.popsize:
nmirrors = Mh.sround(popsize * self.sp.lam_mirr / self.sp.popsize)
# TODO: now selective mirroring might be impaired
assert nmirrors <= popsize // 2
self.mirrors_idx = np.arange(nmirrors) # might never be used
self.mirrors_rejected_idx = [] # might never be used
if xmean is None:
xmean = self.mean
# do the work
fit = [] # or np.NaN * np.empty(number)
X_first = self.ask(popsize)
X = []
for k in xrange(int(popsize)):
nreject = -1
f = np.NaN
while f in (np.NaN, None): # rejection sampling
nreject += 1
if k < popsize - nmirrors or nreject:
if nreject:
x = self.ask(1, xmean, sigma_fac)[0]
else:
x = X_first.pop(0)
else: # mirrored sample
if k == popsize - nmirrors and selective_mirroring:
self.mirrors_idx = np.argsort(fit)[-1:-1-nmirrors:-1]
x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]])
if nreject == 1 and k >= popsize - nmirrors:
self.mirrors_rejected_idx.append(k)
# contraints handling test hardwired ccccccccccc
if 11 < 3 and self.opts['vv'] and nreject < 2: # trying out negative C-update as constraints handling
try:
_tmp = self.constraints_paths
except:
k = 1
self.constraints_paths = [np.zeros(self.N) for _i in xrange(k)]
Izero = np.zeros([self.N, self.N])
for i in xrange(self.N):
if x[i] < 0:
Izero[i][i] = 1
self.C -= self.opts['vv'] * Izero
Izero[i][i] = 0
if 1 < 3 and sum([ (9 + i + 1) * x[i] for i in xrange(self.N)]) > 50e3:
self.constraints_paths[0] = 0.9 * self.constraints_paths[0] + 0.1 * (x - self.mean) / self.sigma
self.C -= (self.opts['vv'] / self.N) * np.outer(self.constraints_paths[0], self.constraints_paths[0])
f = func(x, *args)
if f not in (np.NaN, None) and evaluations > 1:
f = aggregation([f] + [func(x, *args) for _i in xrange(int(evaluations-1))])
if nreject + 1 % 1000 == 0:
print(' %d solutions rejected (f-value NaN or None) at iteration' %
(nreject, self.countiter))
fit.append(f)
X.append(x)
self.evaluations_per_f_value = int(evaluations)
return X, fit
#____________________________________________________________
def tell(self, solutions, function_values,
function_values_reevaluated=None, check_points=None, copy=False):
"""pass objective function values to prepare for next
iteration. This core procedure of the CMA-ES algorithm updates
all state variables: two evolution paths, the distribution mean,
the covariance matrix and a step-size.
Arguments
---------
`solutions`
list or array of candidate solution points (of
type `numpy.ndarray`), most presumably before
delivered by method `ask()` or `ask_and_eval()`.
`function_values`
list or array of objective function values
corresponding to the respective points. Beside for termination
decisions, only the ranking of values in `function_values`
is used.
`check_points`
if ``True``, allows to savely pass solutions that are
not necessarily generated using `ask()`. Might just as well be a
list of indices to be checked in solutions. Value ``None`` defaults
to ``False``.
`copy`
``solutions`` might be modified, if ``copy is False``
Details
-------
`tell()` updates the parameters of the multivariate
normal search distribution, namely covariance matrix and
step-size and updates also the attributes `countiter` and
`countevals`. To check the points for consistency is quadratic
in the dimension (like sampling points).
Bugs
----
The effect of changing the solutions delivered by `ask()` depends on whether
boundary handling is applied. With boundary handling, modifications are
disregarded. This is necessary to apply the default boundary handling that
uses unrepaired solutions but might change in future.
Example
-------
::
import cma
func = cma.fcts.elli # choose objective function
es = cma.CMAEvolutionStrategy(cma.np.random.rand(10), 1)
while not es.stop():
X = es.ask()
es.tell(X, [func(x) for x in X])
es.result() # where the result can be found
:See: class `CMAEvolutionStrategy`, `ask()`, `ask_and_eval()`, `fmin()`
"""
#____________________________________________________________
# TODO: consider an input argument that flags injected trust-worthy solutions (which means
# that they can be treated "absolut" rather than "relative")
if self.flgtelldone:
raise _Error('tell should only be called once per iteration')
if check_points is None:
check_points = self.opts['check_points']
if check_points is None:
check_points = False
lam = len(solutions)
if lam != array(function_values).shape[0]:
raise _Error('for each candidate solution '
+ 'a function value must be provided')
if lam + self.sp.lam_mirr < 3:
raise _Error('population size ' + str(lam) + ' is too small when option CMA_mirrors * popsize < 0.5')
if not np.isscalar(function_values[0]):
if np.isscalar(function_values[0][0]):
if self.countiter <= 1:
print('WARNING: function values are not a list of scalars (further warnings are suppressed)')
function_values = [val[0] for val in function_values]
else:
raise _Error('objective function values must be a list of scalars')
### prepare
N = self.N
sp = self.sp
if 11 < 3 and lam != sp.popsize: # turned off, because mu should stay constant, still not desastrous
print('WARNING: population size has changed, recomputing parameters')
self.sp.set(self.opts, lam) # not really tested
if lam < sp.mu: # rather decrease cmean instead of having mu > lambda//2
raise _Error('not enough solutions passed to function tell (mu>lambda)')
self.countiter += 1 # >= 1 now
self.countevals += sp.popsize * self.evaluations_per_f_value
self.best.update(solutions, self.sent_solutions, function_values, self.countevals)
flgseparable = self.opts['CMA_diagonal'] is True \
or self.countiter <= self.opts['CMA_diagonal']
if not flgseparable and len(self.C.shape) == 1: # C was diagonal ie 1-D
# enter non-separable phase (no easy return from here)
self.B = np.eye(N) # identity(N)
self.C = np.diag(self.C)
idx = np.argsort(self.D)
self.D = self.D[idx]
self.B = self.B[:,idx]
self.Zneg = np.zeros((N, N))
### manage fitness
fit = self.fit # make short cut
# CPU for N,lam=20,200: this takes 10s vs 7s
fit.bndpen = self.boundPenalty.update(function_values, self)(solutions, self.sent_solutions, self.gp)
# for testing:
# fit.bndpen = self.boundPenalty.update(function_values, self)([s.unrepaired for s in solutions])
fit.idx = np.argsort(array(fit.bndpen) + array(function_values))
fit.fit = array(function_values, copy=False)[fit.idx]
# update output data TODO: this is obsolete!? However: need communicate current best x-value?
# old: out['recent_x'] = self.gp.pheno(pop[0])
self.out['recent_x'] = array(solutions[fit.idx[0]]) # TODO: change in a data structure(?) and use current as identify
self.out['recent_f'] = fit.fit[0]
# fitness histories
fit.hist.insert(0, fit.fit[0])
# if len(self.fit.histbest) < 120+30*N/sp.popsize or # does not help, as tablet in the beginning is the critical counter-case
if ((self.countiter % 5) == 0): # 20 percent of 1e5 gen.
fit.histbest.insert(0, fit.fit[0])
fit.histmedian.insert(0, np.median(fit.fit) if len(fit.fit) < 21
else fit.fit[self.popsize // 2])
if len(fit.histbest) > 2e4: # 10 + 30*N/sp.popsize:
fit.histbest.pop()
fit.histmedian.pop()
if len(fit.hist) > 10 + 30*N/sp.popsize:
fit.hist.pop()
if self.opts['CMA_AII']:
self.aii.tell(solutions, function_values)
self.flgtelldone = True
# for output:
self.mean = self.aii.mean
self.dC = self.aii.sigmai**2
self.sigma = self.aii.sigma
self.D = 1e-11 + (self.aii.r**2)**0.5
self.more_to_write = [self.aii.sigma_r]
return
# TODO: clean up inconsistency when an unrepaired solution is available and used
pop = [] # create pop from input argument solutions
for s in solutions: # use phenotype before Solution.repair()
if use_sent_solutions:
x = self.sent_solutions.pop(s, None) # 12.7s vs 11.3s with N,lambda=20,200
if x is not None:
pop.append(x['geno'])
# TODO: keep additional infos or don't pop s from sent_solutions in the first place
else:
print 'WARNING: solution not found in ``self.sent_solutions``'
pop.append(self.gp.geno(s, copy=copy)) # cannot recover the original genotype with boundary handling
self.repair_genotype(pop[-1]) # necessary if pop[-1] was changed or injected by the user.
print 'repaired'
else: # TODO: to be removed? How about the case with injected solutions?
print 'WARNING: ``geno`` mapping depreciated'
pop.append(self.gp.geno(s, copy=copy))
# self.repair_genotype(pop[-1]) # necessary or not?
# print 'repaired'
mold = self.mean
sigma_fac = 1
# check and normalize each x - m
# check_points is a flag or an index list
# should also a number possible (first check_points points)?
if check_points not in (None, False, 0, [], ()): # useful in case of injected solutions and/or adaptive encoding
try:
if len(check_points):
idx = check_points
except:
idx = xrange(sp.popsize)
for k in idx:
self.repair_genotype(pop[k])
# sort pop
if type(pop) is not array: # only arrays can be multiple indexed
pop = array(pop, copy=False)
pop = pop[fit.idx]
if self.opts['CMA_elitist'] and self.best.f < fit.fit[0]:
xp = [self.best.xdict['geno']]
# xp = [self.gp.geno(self.best.x[:])] # TODO: remove
# print self.mahalanobisNorm(xp[0]-self.mean)
self.clip_or_fit_solutions(xp, [0])
pop = array([xp[0]] + list(pop))
# compute new mean
self.mean = mold + self.sp.cmean * \
(sum(sp.weights * pop[0:sp.mu].T, 1) - mold)
# check Delta m (this is not default, but could become at some point)
# CAVE: upper_length=sqrt(2)+2 is too restrictive, test upper_length = sqrt(2*N) thoroughly.
# simple test case injecting self.mean:
# self.mean = 1e-4 * self.sigma * np.random.randn(N)
if 11 < 3 and self.opts['vv'] and check_points: # TODO: check_points might be an index-list
cmean = self.sp.cmean / min(1, (sqrt(self.opts['vv']*N)+2) / ( # abuse of cmean
(sqrt(self.sp.mueff) / self.sp.cmean) *
self.mahalanobisNorm(self.mean - mold)))
else:
cmean = self.sp.cmean
if 11 < 3: # plot length of mean - mold
self.more_to_write = [sqrt(sp.mueff) *
sum(((1./self.D) * dot(self.B.T, self.mean - mold))**2)**0.5 /
self.sigma / sqrt(N) / cmean]
# get learning rate constants
cc, c1, cmu = sp.cc, sp.c1, sp.cmu
if flgseparable:
cc, c1, cmu = sp.cc_sep, sp.c1_sep, sp.cmu_sep
# now the real work can start
# evolution paths
self.ps = (1-sp.cs) * self.ps + \
(sqrt(sp.cs*(2-sp.cs)*sp.mueff) / self.sigma / cmean) * \
dot(self.B, (1./self.D) * dot(self.B.T, self.mean - mold))
# "hsig", correction with self.countiter seems not necessary, also pc starts with zero
hsig = sum(self.ps**2) / (1-(1-sp.cs)**(2*self.countiter)) / self.N < 2 + 4./(N+1)
if 11 < 3:
# hsig = 1
# sp.cc = 4 / (N + 4)
# sp.cs = 4 / (N + 4)
# sp.cc = 1
# sp.damps = 2 #
# sp.CMA_on = False
# c1 = 0 # 2 / ((N + 1.3)**2 + 0 * sp.mu) # 1 / N**2
# cmu = min([1 - c1, cmu])
if self.countiter == 1:
print 'parameters modified'
# hsig = sum(self.ps**2) / self.N < 2 + 4./(N+1)
# adjust missing variance due to hsig, in 4-D with damps=1e99 and sig0 small
# hsig leads to premature convergence of C otherwise
#hsiga = (1-hsig**2) * c1 * cc * (2-cc) # to be removed in future
c1a = c1 - (1-hsig**2) * c1 * cc * (2-cc) # adjust for variance loss
if 11 < 3: # diagnostic data
self.out['hsigcount'] += 1 - hsig
if not hsig:
self.hsiglist.append(self.countiter)
if 11 < 3: # diagnostic message
if not hsig:
print(str(self.countiter) + ': hsig-stall')
if 11 < 3: # for testing purpose
hsig = 1 # TODO:
# put correction term, but how?
if self.countiter == 1:
print('hsig=1')
self.pc = (1-cc) * self.pc + \
hsig * (sqrt(cc*(2-cc)*sp.mueff) / self.sigma / cmean) * \
(self.mean - mold)
# covariance matrix adaptation/udpate
if sp.CMA_on:
# assert sp.c1 + sp.cmu < sp.mueff / N # ??
assert c1 + cmu <= 1
# default full matrix case
if not flgseparable:
Z = (pop[0:sp.mu] - mold) / self.sigma
Z = dot((cmu * sp.weights) * Z.T, Z) # learning rate integrated
if self.sp.neg.cmuexp:
tmp = (pop[-sp.neg.mu:] - mold) / self.sigma
self.Zneg *= 1 - self.sp.neg.cmuexp # for some reason necessary?
self.Zneg += dot(sp.neg.weights * tmp.T, tmp) - self.C
# self.update_exponential(dot(sp.neg.weights * tmp.T, tmp) - 1 * self.C, -1*self.sp.neg.cmuexp)
if 11 < 3: # ?3 to 5 times slower??
Z = np.zeros((N,N))
for k in xrange(sp.mu):
z = (pop[k]-mold)
Z += np.outer((cmu * sp.weights[k] / self.sigma**2) * z, z)
self.C *= 1 - c1a - cmu
self.C += np.outer(c1 * self.pc, self.pc) + Z
self.dC = np.diag(self.C) # for output and termination checking
else: # separable/diagonal linear case
assert(c1+cmu <= 1)
Z = np.zeros(N)
for k in xrange(sp.mu):
z = (pop[k]-mold) / self.sigma # TODO see above
Z += sp.weights[k] * z * z # is 1-D
self.C = (1-c1a-cmu) * self.C + c1 * self.pc * self.pc + cmu * Z
# TODO: self.C *= exp(cmuneg * (N - dot(sp.neg.weights, **2)
self.dC = self.C
self.D = sqrt(self.C) # C is a 1-D array
self.itereigenupdated = self.countiter
# idx = self.mirror_idx_cov() # take half of mirrored vectors for negative update
# step-size adaptation, adapt sigma
if 11 < 3: #
self.sigma *= sigma_fac * \
np.exp((min((1000, (sp.cs/sp.damps/2) *
(sum(self.ps**2)/N - 1)))))
else:
self.sigma *= sigma_fac * \
np.exp((min((1, (sp.cs/sp.damps) *
(sqrt(sum(self.ps**2))/self.const.chiN - 1)))))
if 11 < 3:
# derandomized MSR = natural gradient descent
lengths = array([sum(z**2)**0.5 for z in self.arz[fit.idx[:self.sp.mu]]])
# print lengths[0::int(self.sp.mu/5)]
self.sigma *= np.exp(self.sp.mueff**0.5 * dot(self.sp.weights, lengths / self.const.chiN - 1))**(2/(N+1))
if 11 < 3 and self.opts['vv']:
if self.countiter < 2:
print('constant sigma applied')
print(self.opts['vv']) # N=10,lam=10: 0.8 is optimal
self.sigma = self.opts['vv'] * self.sp.mueff * sum(self.mean**2)**0.5 / N
if self.sigma * min(self.dC)**0.5 < self.opts['minstd']:
self.sigma = self.opts['minstd'] / min(self.dC)**0.5
# g = self.countiter
# N = self.N
mindx = eval(self.opts['mindx']) if type(self.opts['mindx']) == type('') else self.opts['mindx']
if self.sigma * min(self.D) < mindx:
self.sigma = mindx / min(self.D)
if self.sigma > 1e9 * self.sigma0:
alpha = self.sigma / max(self.D)
self.multiplyC(alpha)
self.sigma /= alpha**0.5
self.opts['tolupsigma'] /= alpha**0.5 # to be compared with sigma
# TODO increase sigma in case of a plateau?
# Uncertainty noise measurement is done on an upper level, was: tobe-inserted
# output, has moved up, e.g. as part of fmin, TODO to be removed
if 11 < 3 and self.opts['verb_log'] > 0 and (self.countiter < 4 or
self.countiter % self.opts['verb_log'] == 0):
# this assumes that two logger with the same name access the same data!
CMADataLogger(self.opts['verb_filenameprefix']).register(self, append=True).add()
# self.writeOutput(solutions[fit.idx[0]])
self.flgtelldone = True
# end tell()
def result(self):
"""return ``(xbest, f(xbest), evaluations_xbest, evaluations, iterations, pheno(xmean), effective_stds)``"""
# TODO: how about xcurrent?
return self.best.get() + (
self.countevals, self.countiter, self.gp.pheno(self.mean), self.gp.scales * self.sigma * self.dC**0.5)
def clip_or_fit_solutions(self, pop, idx):
"""make sure that solutions fit to sample distribution, this interface will probably change.
In particular the frequency of long vectors appearing in pop[idx] - self.mean is limited.
"""
for k in idx:
self.repair_genotype(pop[k])
def repair_genotype(self, x):
"""make sure that solutions fit to sample distribution, this interface will probably change.
In particular the frequency of x - self.mean being long is limited.
"""
mold = self.mean
if 1 < 3: # hard clip at upper_length
upper_length = self.N**0.5 + 2 * self.N / (self.N+2) # should become an Option
fac = self.mahalanobisNorm(x - mold) / upper_length
if fac > 1:
x = (x - mold) / fac + mold
# print self.countiter, k, fac, self.mahalanobisNorm(pop[k] - mold)
# adapt also sigma: which are the trust-worthy/injected solutions?
elif 11 < 3:
return exp(np.tanh(((upper_length*fac)**2/self.N-1)/2) / 2)
else:
if 'checktail' not in self.__dict__: # hasattr(self, 'checktail')
from check_tail_smooth import CheckTail # for the time being
self.checktail = CheckTail()
print('untested feature checktail is on')
fac = self.checktail.addchin(self.mahalanobisNorm(x - mold))
if fac < 1:
x = fac * (x - mold) + mold
return 1.0 # sigma_fac, not in use
#____________________________________________________________
#____________________________________________________________
#
def updateBD(self):
"""update internal variables for sampling the distribution with the
current covariance matrix C. This method is O(N^3), if C is not diagonal.
"""
# itereigenupdated is always up-to-date in the diagonal case
# just double check here
if self.itereigenupdated == self.countiter:
return
if self.sp.neg.cmuexp: # cave:
self.update_exponential(self.Zneg, -self.sp.neg.cmuexp)
# self.C += self.Zpos # pos update after Zneg would be the correct update, overall:
# self.C = self.Zpos + Cs * Mh.expms(-self.sp.neg.cmuexp*Csi*self.Zneg*Csi) * Cs
self.Zneg = np.zeros((self.N, self.N))
if 11 < 3: # normalize trace of C
s = sum(self.dC)
self.C *= self.N / s
self.dC *= self.N / s
self.C = (self.C + self.C.T) / 2
# self.C = np.triu(self.C) + np.triu(self.C,1).T # should work as well
# self.D, self.B = eigh(self.C) # hermitian, ie symmetric C is assumed
if type(self.opts['CMA_eigenmethod']) == type(1):
print('WARNING: option CMA_eigenmethod should be a function, not an integer')
if self.opts['CMA_eigenmethod'] == -1:
# pygsl
# easy to install (well, in Windows install gsl binaries first,
# set system path to respective libgsl-0.dll (or cp the dll to
# python\DLLS ?), in unzipped pygsl edit
# gsl_dist/gsl_site_example.py into gsl_dist/gsl_site.py
# and run "python setup.py build" and "python setup.py install"
# in MINGW32)
if 1 < 3: # import pygsl on the fly
try:
import pygsl.eigen.eigenvectors # TODO efficient enough?
except ImportError:
print('WARNING: could not find pygsl.eigen module, either install pygsl \n' +
' or set option CMA_eigenmethod=1 (is much slower), option set to 1')
self.opts['CMA_eigenmethod'] = 0 # use 0 if 1 is too slow
self.D, self.B = pygsl.eigen.eigenvectors(self.C)
elif self.opts['CMA_eigenmethod'] == 0:
# TODO: thoroughly test np.linalg.eigh
# numpy.linalg.eig crashes in 200-D
# and EVecs with same EVals are not orthogonal
self.D, self.B = np.linalg.eigh(self.C) # self.B[i] is a row and not an eigenvector
else: # is overall two;ten times slower in 10;20-D
self.D, self.B = Misc.eig(self.C) # def eig, see below
else:
self.D, self.B = self.opts['CMA_eigenmethod'](self.C)
# assert(sum(self.D-DD) < 1e-6)
# assert(sum(sum(np.dot(BB, BB.T)-np.eye(self.N))) < 1e-6)
# assert(sum(sum(np.dot(BB * DD, BB.T) - self.C)) < 1e-6)
idx = np.argsort(self.D)
self.D = self.D[idx]
self.B = self.B[:,idx] # self.B[i] is a row, columns self.B[:,i] are eigenvectors
# assert(all(self.B[self.countiter % self.N] == self.B[self.countiter % self.N,:]))
if 11 < 3 and any(abs(sum(self.B[:,0:self.N-1] * self.B[:,1:], 0)) > 1e-6):
print('B is not orthogonal')
print(self.D)
print(sum(self.B[:,0:self.N-1] * self.B[:,1:], 0))
else:
# is O(N^3)
# assert(sum(abs(self.C - np.dot(self.D * self.B, self.B.T))) < N**2*1e-11)
pass
self.D **= 0.5
self.itereigenupdated = self.countiter
def multiplyC(self, alpha):
"""multiply C with a scalar and update all related internal variables (dC, D,...)"""
self.C *= alpha
if self.dC is not self.C:
self.dC *= alpha
self.D *= alpha**0.5
def update_exponential(self, Z, eta, BDpair=None):
"""exponential update of C that guarantees positive definiteness, that is,
instead of the assignment ``C = C + eta * Z``,
C gets C**.5 * exp(eta * C**-.5 * Z * C**-.5) * C**.5.
Parameter Z should have expectation zero, e.g. sum(w[i] * z[i] * z[i].T) - C
if E z z.T = C.
This function conducts two eigendecompositions, assuming that
B and D are not up to date, unless `BDpair` is given. Given BDpair,
B is the eigensystem and D is the vector of sqrt(eigenvalues), one
eigendecomposition is omitted.
Reference: Glasmachers et al 2010, Exponential Natural Evolution Strategies
"""
if eta == 0:
return
if BDpair:
B, D = BDpair
else:
D, B = self.opts['CMA_eigenmethod'](self.C)
D **= 0.5
Csi = dot(B, (B / D).T)
Cs = dot(B, (B * D).T)
self.C = dot(Cs, dot(Mh.expms(eta * dot(Csi, dot(Z, Csi)), self.opts['CMA_eigenmethod']), Cs))
#____________________________________________________________
#____________________________________________________________
#
def _updateCholesky(self, A, Ainv, p, alpha, beta):
"""not yet implemented"""
# BD is A, p is A*Normal(0,I) distributed
# input is assumed to be numpy arrays
# Ainv is needed to compute the evolution path
# this is a stump and is not tested
raise _Error("not yet implemented")
# prepare
alpha = float(alpha)
beta = float(beta)
y = np.dot(Ainv, p)
y_sum = sum(y**2)
# compute scalars
tmp = sqrt(1 + beta * y_sum / alpha)
fac = (sqrt(alpha) / sum(y**2)) * (tmp - 1)
facinv = (1. / (sqrt(alpha) * sum(y**2))) * (1 - 1. / tmp)
# update matrices
A *= sqrt(alpha)
A += np.outer(fac * p, y)
Ainv /= sqrt(alpha)
Ainv -= np.outer(facinv * y, np.dot(y.T, Ainv))
#____________________________________________________________
#____________________________________________________________
def feedForResume(self, X, function_values):
"""Given all "previous" candidate solutions and their respective
function values, the state of a `CMAEvolutionStrategy` object
can be reconstructed from this history. This is the purpose of
function `feedForResume`.
Arguments
---------
`X`
(all) solution points in chronological order, phenotypic
representation. The number of points must be a multiple
of popsize.
`function_values`
respective objective function values
Details
-------
`feedForResume` can be called repeatedly with only parts of
the history. The part must have the length of a multiple
of the population size.
`feedForResume` feeds the history in popsize-chunks into `tell`.
The state of the random number generator might not be
reconstructed, but this would be only relevant for the future.
Example
-------
::
import cma
# prepare
(x0, sigma0) = ... # initial values from previous trial
X = ... # list of generated solutions from a previous trial
f = ... # respective list of f-values
# resume
es = cma.CMAEvolutionStrategy(x0, sigma0)
es.feedForResume(X, f)
# continue with func as objective function
while not es.stop():
X = es.ask()
es.tell(X, [func(x) for x in X])
Credits to Dirk Bueche and Fabrice Marchal for the feeding idea.
:See: class `CMAEvolutionStrategy` for a simple dump/load to resume
"""
if self.countiter > 0:
print('WARNING: feed should generally be used with a new object instance')
if len(X) != len(function_values):
raise _Error('number of solutions ' + str(len(X)) +
' and number function values ' +
str(len(function_values))+' must not differ')
popsize = self.sp.popsize
if (len(X) % popsize) != 0:
raise _Error('number of solutions ' + str(len(X)) +
' must be a multiple of popsize (lambda) ' +
str(popsize))
for i in xrange(len(X) / popsize):
# feed in chunks of size popsize
self.ask() # a fake ask, mainly for a conditioned calling of updateBD
# and secondary to get possibly the same random state
self.tell(X[i*popsize:(i+1)*popsize], function_values[i*popsize:(i+1)*popsize])
#____________________________________________________________
#____________________________________________________________
def readProperties(self):
"""reads dynamic parameters from property file (not implemented)
"""
print('not yet implemented')
#____________________________________________________________
#____________________________________________________________
def mahalanobisNorm(self, dx):
"""
compute the Mahalanobis norm that is induced by the adapted covariance
matrix C times sigma**2.
Argument
--------
A *genotype* difference `dx`.
Example
-------
>>> import cma, numpy
>>> es = cma.CMAEvolutionStrategy(numpy.ones(10), 1)
>>> xx = numpy.random.randn(2, 10)
>>> d = es.mahalanobisNorm(es.gp.geno(xx[0]-xx[1]))
`d` is the distance "in" the true sample distribution,
sampled points have a typical distance of ``sqrt(2*es.N)``,
where `N` is the dimension. In the example, `d` is the
Euclidean distance, because C = I and sigma = 1.
"""
return sqrt(sum((self.D**-1 * np.dot(self.B.T, dx))**2)) / self.sigma
#____________________________________________________________
#____________________________________________________________
def disp_annotation(self):
"""print annotation for `disp()`"""
print('Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec')
sys.stdout.flush()
#____________________________________________________________
#____________________________________________________________
def disp(self, modulo=None): # TODO: rather assign opt['verb_disp'] as default?
"""prints some infos according to `disp_annotation()`, if
``iteration_counter % modulo == 0``
"""
if modulo is None:
modulo = self.opts['verb_disp']
# console display
if modulo:
if (self.countiter-1) % (10 * modulo) < 1:
self.disp_annotation()
if self.countiter > 0 and (self.stop() or self.countiter < 4
or self.countiter % modulo < 1):
if self.opts['verb_time']:
toc = self.elapsed_time()
stime = str(int(toc//60))+':'+str(round(toc%60,1))
else:
stime = ''
print(' '.join((repr(self.countiter).rjust(5),
repr(self.countevals).rjust(7),
'%.15e' % (min(self.fit.fit)),
'%4.1e' % (self.D.max()/self.D.min()),
'%6.2e' % self.sigma,
'%6.0e' % (self.sigma * sqrt(min(self.dC))),
'%6.0e' % (self.sigma * sqrt(max(self.dC))),
stime)))
# if self.countiter < 4:
sys.stdout.flush()
class Options(dict):
"""``Options()`` returns a dictionary with the available options and their
default values for function fmin and for class CMAEvolutionStrategy.
``Options(opts)`` returns the subset of recognized options in dict(opts).
``Options('pop')`` returns a subset of recognized options that contain
'pop' in there keyword name, value or description.
Option values can be "written" in a string and, when passed to fmin
or CMAEvolutionStrategy, are evaluated using "N" and "popsize" as
known values for dimension and population size (sample size, number
of new solutions per iteration). All default option values are such
a string.
Details
-------
All Options are originally defined via the input arguments of
`fmin()`.
Options starting with ``tol`` are termination "tolerances".
For `tolstagnation`, the median over the first and the second half
of at least `tolstagnation` iterations are compared for both, the
per-iteration best and per-iteration median function value.
Some options are, as mentioned (`restarts`,...), only used with `fmin`.
Example
-------
::
import cma
cma.Options('tol')
is a shortcut for cma.Options().match('tol') that returns all options
that contain 'tol' in their name or description.
:See: `fmin`(), `CMAEvolutionStrategy`, `CMAParameters`
"""
# @classmethod # self is the class, not the instance
# @property
# def default(self):
# """returns all options with defaults"""
# return fmin([],[])
@staticmethod
def defaults():
"""return a dictionary with default option values and description,
calls `fmin([], [])`"""
return fmin([], [])
@staticmethod
def versatileOptions():
"""return list of options that can be changed at any time (not only be
initialized), however the list might not be entirely up to date. The
string ' #v ' in the default value indicates a 'versatile' option
that can be changed any time.
"""
return tuple(sorted(i[0] for i in Options.defaults().items() if i[1].find(' #v ') > 0))
def __init__(self, s=None, unchecked=False):
"""return an `Options` instance, either with the default options,
if ``s is None``, or with all options whose name or description
contains `s`, if `s` is a string (case is disregarded),
or with entries from dictionary `s` as options, not complemented
with default options or settings
Returns: see above.
"""
# if not Options.defaults: # this is different from self.defaults!!!
# Options.defaults = fmin([],[])
if s is None:
super(Options, self).__init__(Options.defaults())
# self = Options.defaults()
elif type(s) is str:
super(Options, self).__init__(Options().match(s))
# we could return here
else:
super(Options, self).__init__(s)
if not unchecked:
for key in self.keys():
if key not in Options.defaults():
print('Warning in cma.Options.__init__(): invalid key ``' + str(key) + '`` popped')
self.pop(key)
# self.evaluated = False # would become an option entry
def init(self, dict_or_str, val=None, warn=True):
"""initialize one or several options.
Arguments
---------
`dict_or_str`
a dictionary if ``val is None``, otherwise a key.
If `val` is provided `dict_or_str` must be a valid key.
`val`
value for key
Details
-------
Only known keys are accepted. Known keys are in `Options.defaults()`
"""
#dic = dict_or_key if val is None else {dict_or_key:val}
dic = dict_or_str
if val is not None:
dic = {dict_or_str:val}
for key, val in dic.items():
if key not in Options.defaults():
# TODO: find a better solution?
if warn:
print('Warning in cma.Options.init(): key ' +
str(key) + ' ignored')
else:
self[key] = val
return self
def set(self, dic, val=None, warn=True):
"""set can assign versatile options from `Options.versatileOptions()`
with a new value, use `init()` for the others.
Arguments
---------
`dic`
either a dictionary or a key. In the latter
case, val must be provided
`val`
value for key
`warn`
bool, print a warning if the option cannot be changed
and is therefore omitted
This method will be most probably used with the ``opts`` attribute of
a `CMAEvolutionStrategy` instance.
"""
if val is not None: # dic is a key in this case
dic = {dic:val} # compose a dictionary
for key, val in dic.items():
if key in Options.versatileOptions():
self[key] = val
elif warn:
print('Warning in cma.Options.set(): key ' + str(key) + ' ignored')
return self # to allow o = Options(o).set(new)
def complement(self):
"""add all missing options with their default values"""
for key in Options.defaults():
if key not in self:
self[key] = Options.defaults()[key]
return self
def settable(self):
"""return the subset of those options that are settable at any
time.
Settable options are in `versatileOptions()`, but the
list might be incomlete.
"""
return Options([i for i in self.items()
if i[0] in Options.versatileOptions()])
def __call__(self, key, default=None, loc=None):
"""evaluate and return the value of option `key` on the fly, or
returns those options whose name or description contains `key`,
case disregarded.
Details
-------
Keys that contain `filename` are not evaluated.
For ``loc==None``, `self` is used as environment
but this does not define `N`.
:See: `eval()`, `evalall()`
"""
try:
val = self[key]
except:
return self.match(key)
if loc is None:
loc = self # TODO: this hack is not so useful: popsize could be there, but N is missing
try:
if type(val) is str:
val = val.split('#')[0].strip() # remove comments
if type(val) == type('') and key.find('filename') < 0 and key.find('mindx') < 0:
val = eval(val, globals(), loc)
# invoke default
# TODO: val in ... fails with array type, because it is applied element wise!
# elif val in (None,(),[],{}) and default is not None:
elif val is None and default is not None:
val = eval(str(default), globals(), loc)
except:
pass # slighly optimistic: the previous is bug-free
return val
def eval(self, key, default=None, loc=None):
"""Evaluates and sets the specified option value in
environment `loc`. Many options need `N` to be defined in
`loc`, some need `popsize`.
Details
-------
Keys that contain 'filename' are not evaluated.
For `loc` is None, the self-dict is used as environment
:See: `evalall()`, `__call__`
"""
self[key] = self(key, default, loc)
return self[key]
def evalall(self, loc=None):
"""Evaluates all option values in environment `loc`.
:See: `eval()`
"""
# TODO: this needs rather the parameter N instead of loc
if 'N' in loc.keys(): # TODO: __init__ of CMA can be simplified
popsize = self('popsize', Options.defaults()['popsize'], loc)
for k in self.keys():
self.eval(k, Options.defaults()[k],
{'N':loc['N'], 'popsize':popsize})
return self
def match(self, s=''):
"""return all options that match, in the name or the description,
with string `s`, case is disregarded.
Example: ``cma.Options().match('verb')`` returns the verbosity options.
"""
match = s.lower()
res = {}
for k in sorted(self):
s = str(k) + '=\'' + str(self[k]) + '\''
if match in s.lower():
res[k] = self[k]
return Options(res)
def pp(self):
pprint(self)
def printme(self, linebreak=80):
for i in sorted(Options.defaults().items()):
s = str(i[0]) + "='" + str(i[1]) + "'"
a = s.split(' ')
# print s in chunks
l = '' # start entire to the left
while a:
while a and len(l) + len(a[0]) < linebreak:
l += ' ' + a.pop(0)
print(l)
l = ' ' # tab for subsequent lines
#____________________________________________________________
#____________________________________________________________
#
def timesCroot(self, mat):
"""return C**0.5 times mat, where mat can be a vector or matrix.
Not functional, because _Croot=C**0.5 is never computed (should be in updateBD)
"""
if self.opts['CMA_diagonal'] is True \
or self.countiter <= self.opts['CMA_diagonal']:
res = (self._Croot * mat.T).T
else:
res = np.dot(self._Croot, mat)
return res
def divCroot(self, mat):
"""return C**-1/2 times mat, where mat can be a vector or matrix"""
if self.opts['CMA_diagonal'] is True \
or self.countiter <= self.opts['CMA_diagonal']:
res = (self._Crootinv * mat.T).T
else:
res = np.dot(self._Crootinv, mat)
return res
#____________________________________________________________
#____________________________________________________________
class CMAParameters(object):
"""strategy parameters like population size and learning rates.
Note:
contrary to `Options`, `CMAParameters` is not (yet) part of the
"user-interface" and subject to future changes (it might become
a `collections.namedtuple`)
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(20 * [0.1], 1)
(6_w,12)-CMA-ES (mu_w=3.7,w_1=40%) in dimension 20 (seed=504519190) # the seed is "random" by default
>>>
>>> type(es.sp) # sp contains the strategy parameters
<class 'cma.CMAParameters'>
>>>
>>> es.sp.disp()
{'CMA_on': True,
'N': 20,
'c1': 0.004181139918745593,
'c1_sep': 0.034327992810300939,
'cc': 0.17176721127681213,
'cc_sep': 0.25259494835857677,
'cmean': 1.0,
'cmu': 0.0085149624979034746,
'cmu_sep': 0.057796356229390715,
'cs': 0.21434997799189287,
'damps': 1.2143499779918929,
'mu': 6,
'mu_f': 6.0,
'mueff': 3.7294589343030671,
'popsize': 12,
'rankmualpha': 0.3,
'weights': array([ 0.40240294, 0.25338908, 0.16622156, 0.10437523, 0.05640348,
0.01720771])}
>>>
>> es.sp == cma.CMAParameters(20, 12, cma.Options().evalall({'N': 20}))
True
:See: `Options`, `CMAEvolutionStrategy`
"""
def __init__(self, N, opts, ccovfac=1, verbose=True):
"""Compute strategy parameters, mainly depending on
dimension and population size, by calling `set`
"""
self.N = N
if ccovfac == 1:
ccovfac = opts['CMA_on'] # that's a hack
self.set(opts, ccovfac=ccovfac, verbose=verbose)
def set(self, opts, popsize=None, ccovfac=1, verbose=True):
"""Compute strategy parameters as a function
of dimension and population size """
alpha_cc = 1.0 # cc-correction for mueff, was zero before
def cone(df, mu, N, alphacov=2.0):
"""rank one update learning rate, ``df`` is disregarded and obsolete, reduce alphacov on noisy problems, say to 0.5"""
return alphacov / ((N + 1.3)**2 + mu)
def cmu(df, mu, alphamu=0.0, alphacov=2.0):
"""rank mu learning rate, disregarding the constrant cmu <= 1 - cone"""
c = alphacov * (alphamu + mu - 2 + 1/mu) / ((N + 2)**2 + alphacov * mu / 2)
# c = alphacov * (alphamu + mu - 2 + 1/mu) / (2 * (N + 2)**1.5 + alphacov * mu / 2)
# print 'cmu =', c
return c
def conedf(df, mu, N):
"""used for computing separable learning rate"""
return 1. / (df + 2.*sqrt(df) + float(mu)/N)
def cmudf(df, mu, alphamu):
"""used for computing separable learning rate"""
return (alphamu + mu - 2. + 1./mu) / (df + 4.*sqrt(df) + mu/2.)
sp = self
N = sp.N
if popsize:
opts.evalall({'N':N, 'popsize':popsize})
else:
popsize = opts.evalall({'N':N})['popsize'] # the default popsize is computed in Options()
sp.popsize = popsize
if opts['CMA_mirrors'] < 0.5:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'] * popsize)
elif opts['CMA_mirrors'] > 1:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'])
else:
sp.lam_mirr = int(0.5 + 0.16 * min((popsize, 2 * N + 2)) + 0.29) # 0.158650... * popsize is optimal
# lam = arange(2,22)
# mirr = 0.16 + 0.29/lam
# print(lam); print([int(0.5 + l) for l in mirr*lam])
# [ 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]
# [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4]
sp.mu_f = sp.popsize / 2.0 # float value of mu
if opts['CMA_mu'] is not None:
sp.mu_f = opts['CMA_mu']
sp.mu = int(sp.mu_f + 0.499999) # round down for x.5
# in principle we have mu_opt = popsize/2 + lam_mirr/2,
# which means in particular weights should only be negative for q > 0.5+mirr_frac/2
if sp.mu > sp.popsize - 2 * sp.lam_mirr + 1:
print("WARNING: pairwise selection is not implemented, therefore " +
" mu = %d > %d = %d - 2*%d + 1 = popsize - 2*mirr + 1 can produce a bias" % (
sp.mu, sp.popsize - 2 * sp.lam_mirr + 1, sp.popsize, sp.lam_mirr))
if sp.lam_mirr > sp.popsize // 2:
raise _Error("fraction of mirrors in the population as read from option CMA_mirrors cannot be larger 0.5, " +
"theoretically optimal is 0.159")
sp.weights = log(max([sp.mu, sp.popsize / 2.0]) + 0.5) - log(1 + np.arange(sp.mu))
if 11 < 3: # equal recombination weights
sp.mu = sp.popsize // 4
sp.weights = np.ones(sp.mu)
print sp.weights[:10]
sp.weights /= sum(sp.weights)
sp.mueff = 1 / sum(sp.weights**2)
sp.cs = (sp.mueff + 2) / (N + sp.mueff + 3)
# sp.cs = (sp.mueff + 2) / (N + 1.5*sp.mueff + 1)
sp.cc = (4 + alpha_cc * sp.mueff / N) / (N + 4 + alpha_cc * 2 * sp.mueff / N)
sp.cc_sep = (1 + 1/N + alpha_cc * sp.mueff / N) / (N**0.5 + 1/N + alpha_cc * 2 * sp.mueff / N) # \not\gg\cc
sp.rankmualpha = opts['CMA_rankmualpha']
# sp.rankmualpha = _evalOption(opts['CMA_rankmualpha'], 0.3)
sp.c1 = ccovfac * min(1, sp.popsize/6) * cone((N**2 + N) / 2, sp.mueff, N) # 2. / ((N+1.3)**2 + sp.mucov)
sp.c1_sep = ccovfac * conedf(N, sp.mueff, N)
if 11 < 3:
sp.c1 = 0.
print('c1 is zero')
if opts['CMA_rankmu'] != 0: # also empty
sp.cmu = min(1 - sp.c1, ccovfac * cmu((N**2+N)/2, sp.mueff, sp.rankmualpha))
sp.cmu_sep = min(1 - sp.c1_sep, ccovfac * cmudf(N, sp.mueff, sp.rankmualpha))
else:
sp.cmu = sp.cmu_sep = 0
sp.neg = BlancClass()
if opts['CMA_active']:
# in principle we have mu_opt = popsize/2 + lam_mirr/2,
# which means in particular weights should only be negative for q > 0.5+mirr_frac/2
sp.neg.mu_f = popsize - (popsize + sp.lam_mirr) / 2 if popsize > 2 else 1
sp.neg.weights = log(sp.mu_f + 0.5) - log(1 + np.arange(sp.popsize - int(sp.neg.mu_f), sp.popsize))
sp.neg.mu = len(sp.neg.weights) # maybe never useful?
sp.neg.weights /= sum(sp.neg.weights)
sp.neg.mueff = 1 / sum(sp.neg.weights**2)
sp.neg.cmuexp = opts['CMA_activefac'] * 0.25 * sp.neg.mueff / ((N+2)**1.5 + 2 * sp.neg.mueff)
assert sp.neg.mu >= sp.lam_mirr # not really necessary
# sp.neg.minresidualvariance = 0.66 # not it use, keep at least 0.66 in all directions, small popsize is most critical
else:
sp.neg.cmuexp = 0
sp.CMA_on = sp.c1 + sp.cmu > 0
# print(sp.c1_sep / sp.cc_sep)
if not opts['CMA_on'] and opts['CMA_on'] not in (None,[],(),''):
sp.CMA_on = False
# sp.c1 = sp.cmu = sp.c1_sep = sp.cmu_sep = 0
sp.damps = opts['CMA_dampfac'] * (0.5 +
0.5 * min([1, (sp.lam_mirr/(0.159*sp.popsize) - 1)**2])**1 +
2 * max([0, ((sp.mueff-1) / (N+1))**0.5 - 1]) + sp.cs
)
if 11 < 3:
# this is worse than damps = 1 + sp.cs for the (1,10000)-ES on 40D parabolic ridge
sp.damps = 0.3 + 2 * max([sp.mueff/sp.popsize, ((sp.mueff-1)/(N+1))**0.5 - 1]) + sp.cs
if 11 < 3:
# this does not work for lambda = 4*N^2 on the parabolic ridge
sp.damps = opts['CMA_dampfac'] * (2 - 0*sp.lam_mirr/sp.popsize) * sp.mueff/sp.popsize + 0.3 + sp.cs # nicer future setting
print 'damps =', sp.damps
if 11 < 3:
sp.damps = 10 * sp.damps # 1e99 # (1 + 2*max(0,sqrt((sp.mueff-1)/(N+1))-1)) + sp.cs;
# sp.damps = 20 # 1. + 20 * sp.cs**-1 # 1e99 # (1 + 2*max(0,sqrt((sp.mueff-1)/(N+1))-1)) + sp.cs;
print('damps is %f' % (sp.damps))
sp.cmean = float(opts['CMA_cmean'])
# sp.kappa = 1 # 4-D, lam=16, rank1, kappa < 4 does not influence convergence rate
# in larger dim it does, 15-D with defaults, kappa=8 factor 2
if sp.cmean != 1:
print(' cmean = %f' % (sp.cmean))
if verbose:
if not sp.CMA_on:
print('covariance matrix adaptation turned off')
if opts['CMA_mu'] != None:
print('mu = %f' % (sp.mu_f))
# return self # the constructor returns itself
def disp(self):
pprint(self.__dict__)
#____________________________________________________________
#____________________________________________________________
class CMAStopDict(dict):
"""keep and update a termination condition dictionary, which is
"usually" empty and returned by `CMAEvolutionStrategy.stop()`.
Details
-------
This could be a nested class, but nested classes cannot be serialized.
:See: `stop()`
"""
def __init__(self, d={}):
update = (type(d) == CMAEvolutionStrategy)
inherit = (type(d) == CMAStopDict)
super(CMAStopDict, self).__init__({} if update else d)
self._stoplist = d._stoplist if inherit else [] # multiple entries
self.lastiter = d.lastiter if inherit else 0 # probably not necessary
if update:
self._update(d)
def __call__(self, es):
"""update the dictionary"""
return self._update(es)
def _addstop(self, key, cond, val=None):
if cond:
self.stoplist.append(key) # can have the same key twice
if key in self.opts.keys():
val = self.opts[key]
self[key] = val
def _update(self, es):
"""Test termination criteria and update dictionary.
"""
if es.countiter == self.lastiter:
if es.countiter == 0:
self.__init__()
return self
try:
if es == self.es:
return self
except: # self.es not yet assigned
pass
self.lastiter = es.countiter
self.es = es
self.stoplist = []
N = es.N
opts = es.opts
self.opts = opts # a hack to get _addstop going
# fitness: generic criterion, user defined w/o default
self._addstop('ftarget',
es.best.f < opts['ftarget'])
# maxiter, maxfevals: generic criteria
self._addstop('maxfevals',
es.countevals - 1 >= opts['maxfevals'])
self._addstop('maxiter',
es.countiter >= opts['maxiter'])
# tolx, tolfacupx: generic criteria
# tolfun, tolfunhist (CEC:tolfun includes hist)
self._addstop('tolx',
all([es.sigma*xi < opts['tolx'] for xi in es.pc]) and \
all([es.sigma*xi < opts['tolx'] for xi in sqrt(es.dC)]))
self._addstop('tolfacupx',
any([es.sigma * sig > es.sigma0 * opts['tolfacupx']
for sig in sqrt(es.dC)]))
self._addstop('tolfun',
es.fit.fit[-1] - es.fit.fit[0] < opts['tolfun'] and \
max(es.fit.hist) - min(es.fit.hist) < opts['tolfun'])
self._addstop('tolfunhist',
len(es.fit.hist) > 9 and \
max(es.fit.hist) - min(es.fit.hist) < opts['tolfunhist'])
# worst seen false positive: table N=80,lam=80, getting worse for fevals=35e3 \approx 50 * N**1.5
# but the median is not so much getting worse
# / 5 reflects the sparsity of histbest/median
# / 2 reflects the left and right part to be compared
l = int(max(opts['tolstagnation'] / 5. / 2, len(es.fit.histbest) / 10));
# TODO: why max(..., len(histbest)/10) ???
# TODO: the problem in the beginning is only with best ==> ???
if 11 < 3: #
print(es.countiter, (opts['tolstagnation'], es.countiter > N * (5 + 100 / es.popsize),
len(es.fit.histbest) > 100,
np.median(es.fit.histmedian[:l]) >= np.median(es.fit.histmedian[l:2*l]),
np.median(es.fit.histbest[:l]) >= np.median(es.fit.histbest[l:2*l])))
# equality should handle flat fitness
self._addstop('tolstagnation', # leads sometimes early stop on ftablet, fcigtab, N>=50?
1 < 3 and opts['tolstagnation'] and es.countiter > N * (5 + 100 / es.popsize) and
len(es.fit.histbest) > 100 and 2*l < len(es.fit.histbest) and
np.median(es.fit.histmedian[:l]) >= np.median(es.fit.histmedian[l:2*l]) and
np.median(es.fit.histbest[:l]) >= np.median(es.fit.histbest[l:2*l]))
# iiinteger: stagnation termination can prevent to find the optimum
self._addstop('tolupsigma', opts['tolupsigma'] and
es.sigma / es.sigma0 / np.max(es.D) > opts['tolupsigma'])
if 11 < 3 and 2*l < len(es.fit.histbest): # TODO: this might go wrong, because the nb of written columns changes
tmp = np.array((-np.median(es.fit.histmedian[:l]) + np.median(es.fit.histmedian[l:2*l]),
-np.median(es.fit.histbest[:l]) + np.median(es.fit.histbest[l:2*l])))
es.more_to_write = [(10**t if t < 0 else t + 1) for t in tmp] # the latter to get monotonicy
if 1 < 3:
# non-user defined, method specific
# noeffectaxis (CEC: 0.1sigma), noeffectcoord (CEC:0.2sigma), conditioncov
self._addstop('noeffectcoord',
any([es.mean[i] == es.mean[i] + 0.2*es.sigma*sqrt(es.dC[i])
for i in xrange(N)]))
if opts['CMA_diagonal'] is not True and es.countiter > opts['CMA_diagonal']:
i = es.countiter % N
self._addstop('noeffectaxis',
sum(es.mean == es.mean + 0.1 * es.sigma * es.D[i] * es.B[:, i]) == N)
self._addstop('conditioncov',
es.D[-1] > 1e7 * es.D[0], 1e14) # TODO
self._addstop('callback', es.callbackstop) # termination_callback
if len(self):
self._addstop('flat fitness: please (re)consider how to compute the fitness more elaborate',
len(es.fit.hist) > 9 and \
max(es.fit.hist) == min(es.fit.hist))
if 11 < 3 and opts['vv'] == 321:
self._addstop('||xmean||^2<ftarget', sum(es.mean**2) <= opts['ftarget'])
return self
#_____________________________________________________________________
#_____________________________________________________________________
#
class BaseDataLogger(object):
""""abstract" base class for a data logger that can be used with an `OOOptimizer`"""
def add(self, optim=None, more_data=[]):
"""abstract method, add a "data point" from the state of `optim` into the
logger, the argument `optim` can be omitted if it was `register()`-ed before,
acts like an event handler"""
OOOptimizer.abstract()
def register(self, optim):
"""abstract method, register an optimizer `optim`, only needed if `add()` is
called without a value for the `optim` argument"""
self.optim = optim
def disp(self):
"""display some data trace (not implemented)"""
print('method BaseDataLogger.disp() not implemented, to be done in subclass ' + str(type(self)))
def plot(self):
"""plot data (not implemented)"""
print('method BaseDataLogger.plot() is not implemented, to be done in subclass ' + str(type(self)))
def data(self):
"""return logged data in a dictionary (not implemented)"""
print('method BaseDataLogger.data() is not implemented, to be done in subclass ' + str(type(self)))
#_____________________________________________________________________
#_____________________________________________________________________
#
class CMADataLogger(BaseDataLogger): # might become a dict at some point
"""data logger for class `CMAEvolutionStrategy`. The logger is
identified by its name prefix and writes or reads according
data files.
Examples
========
::
import cma
es = cma.CMAEvolutionStrategy(...)
data = cma.CMADataLogger().register(es)
while not es.stop():
...
data.add() # add can also take an argument
data.plot() # or a short cut can be used:
cma.plot() # plot data from logger with default name
data2 = cma.CMADataLogger(another_filename_prefix).load()
data2.plot()
data2.disp()
::
import cma
from pylab import *
res = cma.fmin(cma.Fcts.sphere, rand(10), 1e-0)
dat = res[-1] # the CMADataLogger
dat.load() # by "default" data are on disk
semilogy(dat.f[:,0], dat.f[:,5]) # plot f versus iteration, see file header
show()
Details
=======
After loading data, the logger has the attributes `xmean`, `xrecent`, `std`, `f`, and `D`,
corresponding to xmean, xrecentbest, stddev, fit, and axlen filename trails.
:See: `disp()`, `plot()`
"""
default_prefix = 'outcmaes'
names = ('axlen','fit','stddev','xmean','xrecentbest')
def __init__(self, name_prefix=default_prefix, modulo=1, append=False):
"""initialize logging of data from a `CMAEvolutionStrategy` instance,
default modulo expands to 1 == log with each call
"""
# super(CMAData, self).__init__({'iter':[], 'stds':[], 'D':[], 'sig':[], 'fit':[], 'xm':[]})
# class properties:
self.counter = 0 # number of calls of add
self.modulo = modulo # allows calling with None
self.append = append
self.name_prefix = name_prefix if name_prefix else CMADataLogger.default_prefix
if type(self.name_prefix) == CMAEvolutionStrategy:
self.name_prefix = self.name_prefix.opts.eval('verb_filenameprefix')
self.registered = False
def register(self, es, append=None, modulo=None):
"""register a `CMAEvolutionStrategy` instance for logging,
``append=True`` appends to previous data logged under the same name,
by default previous data are overwritten.
"""
if type(es) != CMAEvolutionStrategy:
raise TypeError("only class CMAEvolutionStrategy can be registered for logging")
self.es = es
if append is not None:
self.append = append
if modulo is not None:
self.modulo = modulo
if not self.append and self.modulo != 0:
self.initialize() # write file headers
self.registered = True
return self
def initialize(self, modulo=None):
"""reset logger, overwrite original files, `modulo`: log only every modulo call"""
if modulo is not None:
self.modulo = modulo
try:
es = self.es # must have been registered
except AttributeError:
pass # TODO: revise usage of es... that this can pass
raise _Error('call register() before initialize()')
# write headers for output
fn = self.name_prefix + 'fit.dat'
strseedtime = 'seed=%d, %s' % (es.opts['seed'], time.asctime())
try:
with open(fn, 'w') as f:
f.write('% # columns="iteration, evaluation, sigma, axis ratio, ' +
'bestever, best, median, worst objective function value, ' +
'further objective values of best", ' +
strseedtime +
# strftime("%Y/%m/%d %H:%M:%S", localtime()) + # just asctime() would do
'\n')
except (IOError, OSError):
print('could not open file ' + fn)
fn = self.name_prefix + 'axlen.dat'
try:
f = open(fn, 'w')
f.write('% columns="iteration, evaluation, sigma, max axis length, ' +
' min axis length, all principle axes lengths ' +
' (sorted square roots of eigenvalues of C)", ' +
strseedtime +
'\n')
f.close()
except (IOError, OSError):
print('could not open file ' + fn)
finally:
f.close()
fn = self.name_prefix + 'stddev.dat'
try:
f = open(fn, 'w')
f.write('% # columns=["iteration, evaluation, sigma, void, void, ' +
' stds==sigma*sqrt(diag(C))", ' +
strseedtime +
'\n')
f.close()
except (IOError, OSError):
print('could not open file ' + fn)
finally:
f.close()
fn = self.name_prefix + 'xmean.dat'
try:
with open(fn, 'w') as f:
f.write('% # columns="iteration, evaluation, void, void, void, xmean", ' +
strseedtime)
f.write(' # scaling_of_variables: ')
if np.size(es.gp.scales) > 1:
f.write(' '.join(map(str, es.gp.scales)))
else:
f.write(str(es.gp.scales))
f.write(', typical_x: ')
if np.size(es.gp.typical_x) > 1:
f.write(' '.join(map(str, es.gp.typical_x)))
else:
f.write(str(es.gp.typical_x))
f.write('\n')
f.close()
except (IOError, OSError):
print('could not open/write file ' + fn)
fn = self.name_prefix + 'xrecentbest.dat'
try:
with open(fn, 'w') as f:
f.write('% # iter+eval+sigma+0+fitness+xbest, ' +
strseedtime +
'\n')
except (IOError, OSError):
print('could not open/write file ' + fn)
return self
# end def __init__
def load(self, filenameprefix=None):
"""loads data from files written and return a data dictionary, *not*
a prerequisite for using `plot()` or `disp()`.
Argument `filenameprefix` is the filename prefix of data to be loaded (five files),
by default ``'outcmaes'``.
Return data dictionary with keys `xrecent`, `xmean`, `f`, `D`, `std`
"""
if not filenameprefix:
filenameprefix = self.name_prefix
dat = self # historical
dat.xrecent = _fileToMatrix(filenameprefix + 'xrecentbest.dat')
dat.xmean = _fileToMatrix(filenameprefix + 'xmean.dat')
dat.std = _fileToMatrix(filenameprefix + 'stddev' + '.dat')
# a hack to later write something into the last entry
for key in ['xmean', 'xrecent', 'std']:
dat.__dict__[key].append(dat.__dict__[key][-1])
dat.__dict__[key] = array(dat.__dict__[key])
dat.f = array(_fileToMatrix(filenameprefix + 'fit.dat'))
dat.D = array(_fileToMatrix(filenameprefix + 'axlen' + '.dat'))
return dat
def add(self, es=None, more_data=[], modulo=None): # TODO: find a different way to communicate current x and f
"""append some logging data from `CMAEvolutionStrategy` class instance `es`,
if ``number_of_times_called % modulo`` equals to zero, never if ``modulo==0``.
The sequence ``more_data`` must always have the same length.
"""
self.counter += 1
mod = modulo if modulo is not None else self.modulo
if mod == 0 or (self.counter > 3 and self.counter % mod):
return
if es is None:
try:
es = self.es # must have been registered
except AttributeError :
raise _Error('call register() before add() or add(es)')
elif not self.registered:
self.register(es)
if type(es) is not CMAEvolutionStrategy:
raise TypeError('<type \'CMAEvolutionStrategy\'> expected, found '
+ str(type(es)) + ' in CMADataLogger.add')
if 1 < 3:
try: # TODO: find a more decent interface to store and pass recent_x
xrecent = es.best.last.x
except:
if self.counter == 2: # by now a recent_x should be available
print('WARNING: es.out[\'recent_x\'] not found in CMADataLogger.add, count='
+ str(self.counter))
try:
# fit
if es.countiter > 0:
fit = es.fit.fit[0]
if es.fmean_noise_free != 0:
fit = es.fmean_noise_free
fn = self.name_prefix + 'fit.dat'
with open(fn, 'a') as f:
f.write(str(es.countiter) + ' '
+ str(es.countevals) + ' '
+ str(es.sigma) + ' '
+ str(es.D.max()/es.D.min()) + ' '
+ str(es.best.f) + ' '
+ '%.16e' % fit + ' '
+ str(es.fit.fit[es.sp.popsize//2]) + ' '
+ str(es.fit.fit[-1]) + ' '
# + str(es.sp.popsize) + ' '
# + str(10**es.noiseS) + ' '
# + str(es.sp.cmean) + ' '
+ ' '.join(str(i) for i in es.more_to_write)
+ ' '.join(str(i) for i in more_data)
+ '\n')
# axlen
fn = self.name_prefix + 'axlen.dat'
with open(fn, 'a') as f: # does not rely on reference counting
f.write(str(es.countiter) + ' '
+ str(es.countevals) + ' '
+ str(es.sigma) + ' '
+ str(es.D.max()) + ' '
+ str(es.D.min()) + ' '
+ ' '.join(map(str, es.D))
+ '\n')
# stddev
fn = self.name_prefix + 'stddev.dat'
with open(fn, 'a') as f:
f.write(str(es.countiter) + ' '
+ str(es.countevals) + ' '
+ str(es.sigma) + ' '
+ '0 0 '
+ ' '.join(map(str, es.sigma*sqrt(es.dC)))
+ '\n')
# xmean
fn = self.name_prefix + 'xmean.dat'
with open(fn, 'a') as f:
if es.countevals < es.sp.popsize:
f.write('0 0 0 0 0 '
+ ' '.join(map(str,
# TODO should be optional the phenotyp?
# es.gp.geno(es.x0)
es.mean))
+ '\n')
else:
f.write(str(es.countiter) + ' '
+ str(es.countevals) + ' '
# + str(es.sigma) + ' '
+ '0 '
+ str(es.fmean_noise_free) + ' '
+ str(es.fmean) + ' ' # TODO: this does not make sense
# TODO should be optional the phenotyp?
+ ' '.join(map(str, es.mean))
+ '\n')
# xrecent
fn = self.name_prefix + 'xrecentbest.dat'
if es.countiter > 0 and xrecent is not None:
with open(fn, 'a') as f:
f.write(str(es.countiter) + ' '
+ str(es.countevals) + ' '
+ str(es.sigma) + ' '
+ '0 '
+ str(es.fit.fit[0]) + ' '
+ ' '.join(map(str, xrecent))
+ '\n')
except (IOError, OSError):
if es.countiter == 1:
print('could not open/write file')
def closefig(self):
pylab.close(self.fighandle)
def save(self, nameprefix, switch=False):
"""saves logger data to a different set of files, for
``switch=True`` also the loggers name prefix is switched to
the new value
"""
if not nameprefix or type(nameprefix) is not str:
_Error('filename prefix must be a nonempty string')
if nameprefix == self.default_prefix:
_Error('cannot save to default name "' + nameprefix + '...", chose another name')
if nameprefix == self.name_prefix:
return
for name in CMADataLogger.names:
open(nameprefix+name+'.dat', 'w').write(open(self.name_prefix+name+'.dat').read())
if switch:
self.name_prefix = nameprefix
def plot(self, fig=None, iabscissa=1, iteridx=None, plot_mean=True, # TODO: plot_mean default should be False
foffset=1e-19, x_opt = None, fontsize=10):
"""
plot data from a `CMADataLogger` (using the files written by the logger).
Arguments
---------
`fig`
figure number, by default 325
`iabscissa`
``0==plot`` versus iteration count,
``1==plot`` versus function evaluation number
`iteridx`
iteration indices to plot
Return `CMADataLogger` itself.
Examples
--------
::
import cma
logger = cma.CMADataLogger() # with default name
# try to plot the "default logging" data (e.g. from previous fmin calls)
logger.plot() # to continue you might need to close the pop-up window
# once and call plot() again.
# This behavior seems to disappear in subsequent
# calls of plot(). Also using ipython with -pylab
# option might help.
cma.savefig('fig325.png') # save current figure
logger.closefig()
Dependencies: matlabplotlib/pylab.
"""
dat = self.load(self.name_prefix)
try:
# pylab: prodedural interface for matplotlib
from matplotlib.pylab import figure, ioff, ion, subplot, semilogy, hold, plot, grid, \
axis, title, text, xlabel, isinteractive, draw, gcf
except ImportError:
ImportError('could not find matplotlib.pylab module, function plot() is not available')
return
if fontsize and pylab.rcParams['font.size'] != fontsize:
print('global variable pylab.rcParams[\'font.size\'] set (from ' +
str(pylab.rcParams['font.size']) + ') to ' + str(fontsize))
pylab.rcParams['font.size'] = fontsize # subtracted in the end, but return can happen inbetween
if fig:
figure(fig)
else:
figure(325)
# show() # should not be necessary
self.fighandle = gcf() # fighandle.number
if iabscissa not in (0,1):
iabscissa = 1
interactive_status = isinteractive()
ioff() # prevents immediate drawing
dat.x = dat.xrecent
if len(dat.x) < 2:
print('not enough data to plot')
return {}
if plot_mean:
dat.x = dat.xmean # this is the genotyp
if iteridx is not None:
dat.f = dat.f[np.where(map(lambda x: x in iteridx, dat.f[:,0]))[0],:]
dat.D = dat.D[np.where(map(lambda x: x in iteridx, dat.D[:,0]))[0],:]
iteridx.append(dat.x[-1,1]) # last entry is artificial
dat.x = dat.x[np.where(map(lambda x: x in iteridx, dat.x[:,0]))[0],:]
dat.std = dat.std[np.where(map(lambda x: x in iteridx, dat.std[:,0]))[0],:]
if iabscissa == 0:
xlab = 'iterations'
elif iabscissa == 1:
xlab = 'function evaluations'
# use fake last entry in x and std for line extension-annotation
if dat.x.shape[1] < 100:
minxend = int(1.06*dat.x[-2, iabscissa])
# write y-values for individual annotation into dat.x
dat.x[-1, iabscissa] = minxend # TODO: should be ax[1]
idx = np.argsort(dat.x[-2,5:])
idx2 = np.argsort(idx)
if x_opt is None:
dat.x[-1,5+idx] = np.linspace(np.min(dat.x[:,5:]),
np.max(dat.x[:,5:]), dat.x.shape[1]-5)
else:
dat.x[-1,5+idx] = np.logspace(np.log10(np.min(abs(dat.x[:,5:]))),
np.log10(np.max(abs(dat.x[:,5:]))), dat.x.shape[1]-5)
else:
minxend = 0
if len(dat.f) == 0:
print('nothing to plot')
return
# not in use anymore, see formatter above
# xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))
# dfit(dfit<1e-98) = NaN;
ioff() # turns update off
# TODO: if abscissa==0 plot in chunks, ie loop over subsets where dat.f[:,0]==countiter is monotonous
subplot(2,2,1)
self.plotdivers(dat, iabscissa, foffset)
# TODO: modularize also the remaining subplots
subplot(2,2,2)
hold(False)
if x_opt is not None: # TODO: differentate neg and pos?
semilogy(dat.x[:, iabscissa], abs(dat.x[:,5:]) - x_opt, '-')
else:
plot(dat.x[:, iabscissa], dat.x[:,5:],'-')
hold(True)
grid(True)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
ax[1] -= 1e-6
if dat.x.shape[1] < 100:
yy = np.linspace(ax[2]+1e-6, ax[3]-1e-6, dat.x.shape[1]-5)
#yyl = np.sort(dat.x[-1,5:])
idx = np.argsort(dat.x[-1,5:])
idx2 = np.argsort(idx)
if x_opt is not None:
semilogy([dat.x[-1, iabscissa], ax[1]], [abs(dat.x[-1,5:]), yy[idx2]], 'k-') # line from last data point
semilogy(np.dot(dat.x[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-')
else:
# plot([dat.x[-1, iabscissa], ax[1]], [dat.x[-1,5:], yy[idx2]], 'k-') # line from last data point
plot(np.dot(dat.x[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-')
# plot(array([dat.x[-1, iabscissa], ax[1]]),
# reshape(array([dat.x[-1,5:], yy[idx2]]).flatten(), (2,4)), '-k')
for i in range(len(idx)):
# TODOqqq: annotate phenotypic value!?
# text(ax[1], yy[i], 'x(' + str(idx[i]) + ')=' + str(dat.x[-2,5+idx[i]]))
text(dat.x[-1,iabscissa], dat.x[-1,5+i], 'x(' + str(i) + ')=' + str(dat.x[-2,5+i]))
i = 2 # find smallest i where iteration count differs (in case the same row appears twice)
while i < len(dat.f) and dat.f[-i][0] == dat.f[-1][0]:
i += 1
title('Object Variables (' + ('mean' if plot_mean else 'curr best') +
', ' + str(dat.x.shape[1]-5) + '-D, popsize~' +
(str(int((dat.f[-1][1] - dat.f[-i][1]) / (dat.f[-1][0] - dat.f[-i][0])))
if len(dat.f.T[0]) > 1 and dat.f[-1][0] > dat.f[-i][0] else 'NA')
+ ')')
# pylab.xticks(xticklocs)
# Scaling
subplot(2,2,3)
hold(False)
semilogy(dat.D[:, iabscissa], dat.D[:,5:], '-b')
hold(True)
grid(True)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
title('Scaling (All Main Axes)')
# pylab.xticks(xticklocs)
xlabel(xlab)
# standard deviations
subplot(2,2,4)
hold(False)
# remove sigma from stds (graphs become much better readible)
dat.std[:,5:] = np.transpose(dat.std[:,5:].T / dat.std[:,2].T)
# ax = array(axis())
# ax[1] = max(minxend, ax[1])
# axis(ax)
if 1 < 2 and dat.std.shape[1] < 100:
# use fake last entry in x and std for line extension-annotation
minxend = int(1.06*dat.x[-2, iabscissa])
dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
idx = np.argsort(dat.std[-2,5:])
idx2 = np.argsort(idx)
dat.std[-1,5+idx] = np.logspace(np.log10(np.min(dat.std[:,5:])),
np.log10(np.max(dat.std[:,5:])), dat.std.shape[1]-5)
dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
yy = np.logspace(np.log10(ax[2]), np.log10(ax[3]), dat.std.shape[1]-5)
#yyl = np.sort(dat.std[-1,5:])
idx = np.argsort(dat.std[-1,5:])
idx2 = np.argsort(idx)
# plot(np.dot(dat.std[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-') # vertical separator
# vertical separator
plot(np.dot(dat.std[-2, iabscissa],[1,1]), array([np.min(dat.std[-2,5:]), np.max(dat.std[-2,5:])]), 'k-')
hold(True)
# plot([dat.std[-1, iabscissa], ax[1]], [dat.std[-1,5:], yy[idx2]], 'k-') # line from last data point
for i in xrange(len(idx)):
# text(ax[1], yy[i], ' '+str(idx[i]))
text(dat.std[-1, iabscissa], dat.std[-1, 5+i], ' '+str(i))
semilogy(dat.std[:, iabscissa], dat.std[:,5:], '-')
grid(True)
title('Standard Deviations in All Coordinates')
# pylab.xticks(xticklocs)
xlabel(xlab)
draw() # does not suffice
if interactive_status:
ion() # turns interactive mode on (again)
draw()
show()
return self
#____________________________________________________________
#____________________________________________________________
#
@staticmethod
def plotdivers(dat, iabscissa, foffset):
"""helper function for `plot()` that plots all what is
in the upper left subplot like fitness, sigma, etc.
Arguments
---------
`iabscissa` in ``(0,1)``
0==versus fevals, 1==versus iteration
`foffset`
offset to fitness for log-plot
:See: `plot()`
"""
from matplotlib.pylab import semilogy, hold, grid, \
axis, title, text
fontsize = pylab.rcParams['font.size']
hold(False)
dfit = dat.f[:,5]-min(dat.f[:,5])
dfit[dfit<1e-98] = np.NaN
if dat.f.shape[1] > 7:
# semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7, 10, 12]])+foffset,'-k')
semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7]])+foffset,'-k')
hold(True)
# (larger indices): additional fitness data, for example constraints values
if dat.f.shape[1] > 8:
# dd = abs(dat.f[:,7:]) + 10*foffset
# dd = np.where(dat.f[:,7:]==0, np.NaN, dd) # cannot be
semilogy(dat.f[:, iabscissa], np.abs(dat.f[:,8:]) + 10*foffset, 'm')
hold(True)
idx = np.where(dat.f[:,5]>1e-98)[0] # positive values
semilogy(dat.f[idx, iabscissa], dat.f[idx,5]+foffset, '.b')
hold(True)
grid(True)
idx = np.where(dat.f[:,5] < -1e-98) # negative values
semilogy(dat.f[idx, iabscissa], abs(dat.f[idx,5])+foffset,'.r')
semilogy(dat.f[:, iabscissa],abs(dat.f[:,5])+foffset,'-b')
semilogy(dat.f[:, iabscissa], dfit, '-c')
if 11 < 3: # delta-fitness as points
dfit = dat.f[1:, 5] - dat.f[:-1,5] # should be negative usually
semilogy(dat.f[1:,iabscissa], # abs(fit(g) - fit(g-1))
np.abs(dfit)+foffset, '.c')
i = dfit > 0
# print(np.sum(i) / float(len(dat.f[1:,iabscissa])))
semilogy(dat.f[1:,iabscissa][i], # abs(fit(g) - fit(g-1))
np.abs(dfit[i])+foffset, '.r')
# overall minimum
i = np.argmin(dat.f[:,5])
semilogy(dat.f[i, iabscissa]*np.ones(2), dat.f[i,5]*np.ones(2), 'rd')
# semilogy(dat.f[-1, iabscissa]*np.ones(2), dat.f[-1,4]*np.ones(2), 'rd')
# AR and sigma
semilogy(dat.f[:, iabscissa], dat.f[:,3], '-r') # AR
semilogy(dat.f[:, iabscissa], dat.f[:,2],'-g') # sigma
semilogy(dat.std[:-1, iabscissa], np.vstack([map(max, dat.std[:-1,5:]), map(min, dat.std[:-1,5:])]).T,
'-m', linewidth=2)
text(dat.std[-2, iabscissa], max(dat.std[-2, 5:]), 'max std', fontsize=fontsize)
text(dat.std[-2, iabscissa], min(dat.std[-2, 5:]), 'min std', fontsize=fontsize)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
text(ax[0]+0.01, ax[2], # 10**(log10(ax[2])+0.05*(log10(ax[3])-log10(ax[2]))),
'.f_recent=' + repr(dat.f[-1,5]) )
# title('abs(f) (blue), f-min(f) (cyan), Sigma (green), Axis Ratio (red)')
title('blue:abs(f), cyan:f-min(f), green:sigma, red:axis ratio', fontsize=fontsize-1)
# pylab.xticks(xticklocs)
def downsampling(self, factor=10, first=3, switch=True):
"""
rude downsampling of a `CMADataLogger` data file by `factor`, keeping
also the first `first` entries. This function is a stump and subject
to future changes.
Arguments
---------
- `factor` -- downsampling factor
- `first` -- keep first `first` entries
- `switch` -- switch the new logger name to oldname+'down'
Details
-------
``self.name_prefix+'down'`` files are written
Example
-------
::
import cma
cma.downsampling() # takes outcmaes* files
cma.plot('outcmaesdown')
"""
newprefix = self.name_prefix + 'down'
for name in CMADataLogger.names:
f = open(newprefix+name+'.dat','w')
iline = 0
cwritten = 0
for line in open(self.name_prefix+name+'.dat'):
if iline < first or iline % factor == 0:
f.write(line)
cwritten += 1
iline += 1
f.close()
print('%d' % (cwritten) + ' lines written in ' + newprefix+name+'.dat')
if switch:
self.name_prefix += 'down'
return self
#____________________________________________________________
#____________________________________________________________
#
def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):
"""displays selected data from (files written by) the class `CMADataLogger`.
Arguments
---------
`idx`
indices corresponding to rows in the data file;
if idx is a scalar (int), the first two, then every idx-th,
and the last three rows are displayed. Too large index values are removed.
Example
-------
>>> import cma, numpy as np
>>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, verb_disp=1e9) # generate data
>>> assert res[1] < 1e-9
>>> assert res[2] < 4400
>>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data
>>> l.disp([0,-1]) # first and last
>>> l.disp(20) # some first/last and every 20-th line
>>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last
>>> l.disp(np.r_[0, -10:0]) # first and ten last
>>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...)
Details
-------
The data line with the best f-value is displayed as last line.
:See: `disp()`
"""
filenameprefix=self.name_prefix
def printdatarow(dat, iter):
"""print data of iteration i"""
i = np.where(dat.f[:, 0] == iter)[0][0]
j = np.where(dat.std[:, 0] == iter)[0][0]
print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +
' %5.1e' % (dat.f[i,3]) +
' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))
dat = CMADataLogger(filenameprefix).load()
ndata = dat.f.shape[0]
# map index to iteration number, is difficult if not all iteration numbers exist
# idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long
# otherwise:
if idx is None:
idx = 100
if np.isscalar(idx):
# idx = np.arange(0, ndata, idx)
if idx:
idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]
else:
idx = np.r_[0, 1, -3:0]
idx = array(idx)
idx = idx[idx<=ndata] # TODO: shouldn't this be "<"?
idx = idx[-idx<=ndata]
iters = dat.f[idx, 0]
idxbest = np.argmin(dat.f[:,5])
iterbest = dat.f[idxbest, 0]
heading = 'Iterat Nfevals function value axis ratio maxstd minstd'
print(heading)
for i in iters:
printdatarow(dat, i)
print(heading)
printdatarow(dat, iterbest)
# end class CMADataLogger
#____________________________________________________________
#____________________________________________________________
#
def irg(ar):
return xrange(len(ar))
class AII(object):
"""unstable experimental code, updates ps, sigma, sigmai, pr, r, sigma_r, mean,
all from self.
Depends on that the ordering of solutions has not change upon calling update
should become a OOOptimizer in far future?
"""
# Try: ps**2 - 1 instead of (ps**2)**0.5 / chi1 - 1: compare learning rate etc
# and dito for psr
def __init__(self, x0, sigma0, randn=np.random.randn):
"""TODO: check scaling of r-learing: seems worse than linear: 9e3 25e3 65e3 (10,20,40-D)"""
self.N = len(x0)
N = self.N
# parameters to play with:
# PROBLEM: smaller eta_r even fails on *axparallel* cigar!! Also dampi needs to be smaller then!
self.dampi = 4 * N # two times smaller is
self.eta_r = 0 / N / 3 # c_r learning rate for direction, cigar: 4/N/3 is optimal in 10-D, 10/N/3 still works (15 in 20-D) but not on the axparallel cigar with recombination
self.mu = 1
self.use_abs_sigma = 1 # without it is a problem on 20=D axpar-cigar!!, but why?? Because dampi is just boarderline
self.use_abs_sigma_r = 1 #
self.randn = randn
self.x0 = array(x0, copy=True)
self.sigma0 = sigma0
self.cs = 1 / N**0.5 # evolution path for step-size(s)
self.damps = 1
self.use_sign = 0
self.use_scalar_product = 0 # sometimes makes it somewhat worse on Rosenbrock, don't know why
self.csr = 1 / N**0.5 # cumulation for sigma_r
self.dampsr = (4 * N)**0.5
self.chi1 = (2/np.pi)**0.5
self.chiN = N**0.5*(1-1./(4.*N)+1./(21.*N**2)) # expectation of norm(randn(N,1))
self.initialize()
def initialize(self):
"""alias ``reset``, set all state variables to initial values"""
N = self.N
self.mean = array(self.x0, copy=True)
self.sigma = self.sigma0
self.sigmai = np.ones(N)
self.ps = np.zeros(N) # path for individual and globalstep-size(s)
self.r = np.zeros(N)
self.pr = 0 # cumulation for zr = N(0,1)
self.sigma_r = 0
def ask(self, popsize):
if popsize == 1:
raise NotImplementedError()
self.Z = [self.randn(self.N) for _i in xrange(popsize)]
self.zr = list(self.randn(popsize))
pop = [self.mean + self.sigma * (self.sigmai * self.Z[k])
+ self.zr[k] * self.sigma_r * self.r
for k in xrange(popsize)]
if not np.isfinite(pop[0][0]):
raise ValueError()
return pop
def tell(self, X, f):
"""update """
mu = 1 if self.mu else int(len(f) / 4)
idx = np.argsort(f)[:mu]
zr = [self.zr[i] for i in idx]
Z = [self.Z[i] for i in idx]
X = [X[i] for i in idx]
xmean = np.mean(X, axis=0)
self.ps *= 1 - self.cs
self.ps += (self.cs*(2-self.cs))**0.5 * mu**0.5 * np.mean(Z, axis=0)
self.sigma *= np.exp((self.cs/self.damps) * (sum(self.ps**2)**0.5 / self.chiN - 1))
if self.use_abs_sigma:
self.sigmai *= np.exp((1/self.dampi) * (np.abs(self.ps) / self.chi1 - 1))
else:
self.sigmai *= np.exp((1.3/self.dampi/2) * (self.ps**2 - 1))
self.pr *= 1 - self.csr
self.pr += (self.csr*(2-self.csr))**0.5 * mu**0.5 * np.mean(zr)
fac = 1
if self.use_sign:
fac = np.sign(self.pr) # produces readaptations on the cigar
else:
self.pr = max([0, self.pr])
if self.use_scalar_product:
if np.sign(sum(self.r * (xmean - self.mean))) < 0: # and self.pr > 1:
# if np.sign(sum(self.r * self.ps)) < 0:
self.r *= -1
if self.eta_r:
self.r *= (1 - self.eta_r) * self.sigma_r
self.r += fac * self.eta_r * mu**0.5 * (xmean - self.mean)
self.r /= sum(self.r**2)**0.5
if self.use_abs_sigma_r:
self.sigma_r *= np.exp((1/self.dampsr) * ((self.pr**2)**0.5 / self.chi1 - 1))
else:
# this is worse on the cigar, where the direction vector(!) behaves strangely
self.sigma_r *= np.exp((1/self.dampsr) * (self.pr**2 - 1) / 2)
self.sigma_r = max([self.sigma * sum(self.sigmai**2)**0.5 / 3, self.sigma_r])
# self.sigma_r = 0
self.mean = xmean
def fmin(func, x0, sigma0=None, args=()
# the follow string arguments are evaluated, besides the verb_filenameprefix
, CMA_active='False # exponential negative update, conducted after the original update'
, CMA_activefac='1 # learning rate multiplier for active update'
, CMA_cmean='1 # learning rate for the mean value'
, CMA_diagonal='0*100*N/sqrt(popsize) # nb of iterations with diagonal covariance matrix, True for always' # TODO 4/ccov_separable?
, CMA_eigenmethod='np.linalg.eigh # 0=numpy-s eigh, -1=pygsl, otherwise cma.Misc.eig (slower)'
, CMA_elitist='False # elitism likely impairs global search performance'
, CMA_mirrors='popsize < 6 # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used'
, CMA_mu='None # parents selection parameter, default is popsize // 2'
, CMA_on='True # False or 0 for no adaptation of the covariance matrix'
, CMA_rankmu='True # False or 0 for omitting rank-mu update of covariance matrix'
, CMA_rankmualpha='0.3 # factor of rank-mu update if mu=1, subject to removal, default might change to 0.0'
, CMA_dampfac='1 #v positive multiplier for step-size damping, 0.3 is close to optimal on the sphere'
, CMA_teststds='None # factors for non-isotropic initial distr. mainly for test purpose, see scaling_...'
, CMA_AII='False # not yet tested'
, bounds='[None, None] # lower (=bounds[0]) and upper domain boundaries, each a scalar or a list/vector'
, check_points='None # when repairing or injecting solutions, they should be checked (index-list or True)'
, eval_parallel='False # when True, func might be called with more than one solution as first argument'
, eval_initial_x='False # '
, fixed_variables='None # dictionary with index-value pairs like {0:1.1, 2:0.1} that are not optimized'
, ftarget='-inf #v target function value, minimization'
, incpopsize='2 # in fmin(): multiplier for increasing popsize before each restart'
, maxfevals='inf #v maximum number of function evaluations'
, maxiter='100 + 50 * (N+3)**2 // popsize**0.5 #v maximum number of iterations'
, mindx='0 #v minimal std in any direction, cave interference with tol*'
, minstd='0 #v minimal std in any coordinate direction, cave interference with tol*'
, noise_handling='False # maximal number of evaluations for noise treatment, only fmin'
, noise_reevals=' 1.5 + popsize/20 # number of solution to be reevaluated for noise measurement, only fmin'
, noise_eps='1e-7 # perturbation factor for noise handling reevaluations, only fmin'
, noise_change_sigma='True # exponent to default sigma increment'
, popsize='4+int(3*log(N)) # population size, AKA lambda, number of new solution per iteration'
, randn='np.random.standard_normal #v randn((lam, N)) must return an np.array of shape (lam, N)'
, restarts='0 # in fmin(): number of restarts'
, scaling_of_variables='None # scale for each variable, sigma0 is interpreted w.r.t. this scale, in that effective_sigma0 = sigma0*scaling. Internally the variables are divided by scaling_of_variables and sigma is unchanged, default is ones(N)'
, seed='None # random number seed'
, termination_callback='None #v a function returning True for termination, called after each iteration step and could be abused for side effects'
, tolfacupx='1e3 #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0'
, tolupsigma='1e20 #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates "creeping behavior" with usually minor improvements'
, tolfun='1e-11 #v termination criterion: tolerance in function value, quite useful'
, tolfunhist='1e-12 #v termination criterion: tolerance in function value history'
, tolstagnation='int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations'
, tolx='1e-11 #v termination criterion: tolerance in x-changes'
, transformation='None # [t0, t1] are two mappings, t0 transforms solutions from CMA-representation to f-representation, t1 is the (optional) back transformation, see class GenoPheno'
, typical_x='None # used with scaling_of_variables'
, updatecovwait='None #v number of iterations without distribution update, name is subject to future changes' # TODO: rename: iterwaitupdatedistribution?
, verb_append='0 # initial evaluation counter, if append, do not overwrite output files'
, verb_disp='100 #v verbosity: display console output every verb_disp iteration'
, verb_filenameprefix='outcmaes # output filenames prefix'
, verb_log='1 #v verbosity: write data to files every verb_log iteration, writing can be time critical on fast to evaluate functions'
, verb_plot='0 #v in fmin(): plot() is called every verb_plot iteration'
, verb_time='True #v output timings on console'
, vv='0 #? versatile variable for hacking purposes, value found in self.opts[\'vv\']'
):
"""functional interface to the stochastic optimizer CMA-ES
for non-convex function minimization.
Calling Sequences
=================
``fmin([],[])``
returns all optional arguments, that is,
all keyword arguments to fmin with their default values
in a dictionary.
``fmin(func, x0, sigma0)``
minimizes `func` starting at `x0` and with standard deviation
`sigma0` (step-size)
``fmin(func, x0, sigma0, ftarget=1e-5)``
minimizes `func` up to target function value 1e-5
``fmin(func, x0, sigma0, args=('f',), **options)``
minimizes `func` called with an additional argument ``'f'``.
`options` is a dictionary with additional keyword arguments, e.g.
delivered by `Options()`.
``fmin(func, x0, sigma0, **{'ftarget':1e-5, 'popsize':40})``
the same as ``fmin(func, x0, sigma0, ftarget=1e-5, popsize=40)``
``fmin(func, esobj, **{'maxfevals': 1e5})``
uses the `CMAEvolutionStrategy` object instance `esobj` to optimize
`func`, similar to `CMAEvolutionStrategy.optimize()`.
Arguments
=========
`func`
function to be minimized. Called as
``func(x,*args)``. `x` is a one-dimensional `numpy.ndarray`. `func`
can return `numpy.NaN`,
which is interpreted as outright rejection of solution `x`
and invokes an immediate resampling and (re-)evaluation
of a new solution not counting as function evaluation.
`x0`
list or `numpy.ndarray`, initial guess of minimum solution
or `cma.CMAEvolutionStrategy` object instance. In this case
`sigma0` can be omitted.
`sigma0`
scalar, initial standard deviation in each coordinate.
`sigma0` should be about 1/4 of the search domain width where the
optimum is to be expected. The variables in `func` should be
scaled such that they presumably have similar sensitivity.
See also option `scaling_of_variables`.
Keyword Arguments
=================
All arguments besides `args` and `verb_filenameprefix` are evaluated
if they are of type `str`, see class `Options` for details. The following
list might not be fully up-to-date, use ``cma.Options()`` or
``cma.fmin([],[])`` to get the actual list.
::
args=() -- additional arguments for func, not in `cma.Options()`
CMA_active='False # exponential negative update, conducted after the original
update'
CMA_activefac='1 # learning rate multiplier for active update'
CMA_cmean='1 # learning rate for the mean value'
CMA_dampfac='1 #v positive multiplier for step-size damping, 0.3 is close to
optimal on the sphere'
CMA_diagonal='0*100*N/sqrt(popsize) # nb of iterations with diagonal
covariance matrix, True for always'
CMA_eigenmethod='np.linalg.eigh # 0=numpy-s eigh, -1=pygsl, alternative: Misc.eig (slower)'
CMA_elitist='False # elitism likely impairs global search performance'
CMA_mirrors='0 # values <0.5 are interpreted as fraction, values >1 as numbers
(rounded), otherwise about 0.16 is used'
CMA_mu='None # parents selection parameter, default is popsize // 2'
CMA_on='True # False or 0 for no adaptation of the covariance matrix'
CMA_rankmu='True # False or 0 for omitting rank-mu update of covariance
matrix'
CMA_rankmualpha='0.3 # factor of rank-mu update if mu=1, subject to removal,
default might change to 0.0'
CMA_teststds='None # factors for non-isotropic initial distr. mainly for test
purpose, see scaling_...'
bounds='[None, None] # lower (=bounds[0]) and upper domain boundaries, each a
scalar or a list/vector'
check_points='None # when repairing or injecting solutions, they should be checked
(index-list or True)'
eval_initial_x='False # '
fixed_variables='None # dictionary with index-value pairs like {0:1.1, 2:0.1}
that are not optimized'
ftarget='-inf #v target function value, minimization'
incpopsize='2 # in fmin(): multiplier for increasing popsize before each
restart'
maxfevals='inf #v maximum number of function evaluations'
maxiter='long(1e3*N**2/sqrt(popsize)) #v maximum number of iterations'
mindx='0 #v minimal std in any direction, cave interference with tol*'
minstd='0 #v minimal std in any coordinate direction, cave interference with
tol*'
noise_eps='1e-7 # perturbation factor for noise handling reevaluations, only
fmin'
noise_handling='False # maximal number of evaluations for noise treatment,
only fmin'
noise_reevals=' 1.5 + popsize/20 # number of solution to be reevaluated for
noise measurement, only fmin'
popsize='4+int(3*log(N)) # population size, AKA lambda, number of new solution
per iteration'
randn='np.random.standard_normal #v randn((lam, N)) must return an np.array of
shape (lam, N)'
restarts='0 # in fmin(): number of restarts'
scaling_of_variables='None # scale for each variable, sigma0 is interpreted
w.r.t. this scale, in that effective_sigma0 = sigma0*scaling.
Internally the variables are divided by scaling_of_variables and sigma
is unchanged, default is ones(N)'
seed='None # random number seed'
termination_callback='None #v in fmin(): a function returning True for
termination, called after each iteration step and could be abused for
side effects'
tolfacupx='1e3 #v termination when step-size increases by tolfacupx
(diverges). That is, the initial step-size was chosen far too small and
better solutions were found far away from the initial solution x0'
tolupsigma='1e20 #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C)))
indicates "creeping behavior" with usually minor improvements'
tolfun='1e-11 #v termination criterion: tolerance in function value, quite
useful'
tolfunhist='1e-12 #v termination criterion: tolerance in function value
history'
tolstagnation='int(100 * N**1.5 / popsize) #v termination if no improvement
over tolstagnation iterations'
tolx='1e-11 #v termination criterion: tolerance in x-changes'
transformation='None # [t0, t1] are two mappings, t0 transforms solutions from
CMA-representation to f-representation, t1 is the back transformation,
see class GenoPheno'
typical_x='None # used with scaling_of_variables'
updatecovwait='None #v number of iterations without distribution update, name
is subject to future changes'
verb_append='0 # initial evaluation counter, if append, do not overwrite
output files'
verb_disp='100 #v verbosity: display console output every verb_disp iteration'
verb_filenameprefix='outcmaes # output filenames prefix'
verb_log='1 #v verbosity: write data to files every verb_log iteration,
writing can be time critical on fast to evaluate functions'
verb_plot='0 #v in fmin(): plot() is called every verb_plot iteration'
verb_time='True #v output timings on console'
vv='0 #? versatile variable for hacking purposes, value found in
self.opts['vv']'
Subsets of options can be displayed, for example like ``cma.Options('tol')``,
see also class `Options`.
Return
======
Similar to `OOOptimizer.optimize()` and/or `CMAEvolutionStrategy.optimize()`, return the
list provided by `CMAEvolutionStrategy.result()` appended with an `OOOptimizer` and an
`BaseDataLogger`::
res = optim.result() + (optim.stop(), optim, logger)
where
- ``res[0]`` (``xopt``) -- best evaluated solution
- ``res[1]`` (``fopt``) -- respective function value
- ``res[2]`` (``evalsopt``) -- respective number of function evaluations
- ``res[3]`` (``evals``) -- number of overall conducted objective function evaluations
- ``res[4]`` (``iterations``) -- number of overall conducted iterations
- ``res[5]`` (``xmean``) -- mean of the final sample distribution
- ``res[6]`` (``stds``) -- effective stds of the final sample distribution
- ``res[-3]`` (``stop``) -- termination condition(s) in a dictionary
- ``res[-2]`` (``cmaes``) -- class `CMAEvolutionStrategy` instance
- ``res[-1]`` (``logger``) -- class `CMADataLogger` instance
Details
=======
This function is an interface to the class `CMAEvolutionStrategy`. The
class can be used when full control over the iteration loop of the
optimizer is desired.
The noise handling follows closely [Hansen et al 2009, A Method for Handling
Uncertainty in Evolutionary Optimization...] in the measurement part, but the
implemented treatment is slightly different: for ``noiseS > 0``, ``evaluations``
(time) and sigma are increased by ``alpha``. For ``noiseS < 0``, ``evaluations``
(time) is decreased by ``alpha**(1/4)``. The option ``noise_handling`` switches
the uncertainty handling on/off, the given value defines the maximal number
of evaluations for a single fitness computation. If ``noise_handling`` is a list,
the smallest element defines the minimal number and if the list has three elements,
the median value is the start value for ``evaluations``. See also class
`NoiseHandler`.
Examples
========
The following example calls `fmin` optimizing the Rosenbrock function
in 10-D with initial solution 0.1 and initial step-size 0.5. The
options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.Options() # returns all possible options
>>> options = {'CMA_diagonal':10, 'seed':1234, 'verb_time':0}
>>>
>>> res = cma.fmin(cma.fcts.rosen, [0.1] * 10, 0.5, **options)
(5_w,10)-CMA-ES (mu_w=3.2,w_1=45%) in dimension 10 (seed=1234)
Covariance matrix is diagonal for 10 iterations (1/ccov=29.0)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 10 1.264232686260072e+02 1.1e+00 4.40e-01 4e-01 4e-01
2 20 1.023929748193649e+02 1.1e+00 4.00e-01 4e-01 4e-01
3 30 1.214724267489674e+02 1.2e+00 3.70e-01 3e-01 4e-01
100 1000 6.366683525319511e+00 6.2e+00 2.49e-02 9e-03 3e-02
200 2000 3.347312410388666e+00 1.2e+01 4.52e-02 8e-03 4e-02
300 3000 1.027509686232270e+00 1.3e+01 2.85e-02 5e-03 2e-02
400 4000 1.279649321170636e-01 2.3e+01 3.53e-02 3e-03 3e-02
500 5000 4.302636076186532e-04 4.6e+01 4.78e-03 3e-04 5e-03
600 6000 6.943669235595049e-11 5.1e+01 5.41e-06 1e-07 4e-06
650 6500 5.557961334063003e-14 5.4e+01 1.88e-07 4e-09 1e-07
termination on tolfun : 1e-11
final/bestever f-value = 5.55796133406e-14 2.62435631419e-14
mean solution: [ 1. 1.00000001 1. 1.
1. 1.00000001 1.00000002 1.00000003 ...]
std deviation: [ 3.9193387e-09 3.7792732e-09 4.0062285e-09 4.6605925e-09
5.4966188e-09 7.4377745e-09 1.3797207e-08 2.6020765e-08 ...]
>>>
>>> print('best solutions fitness = %f' % (res[1]))
best solutions fitness = 2.62435631419e-14
>>> assert res[1] < 1e-12
The method ::
cma.plot();
(based on `matplotlib.pylab`) produces a plot of the run and, if necessary::
cma.show()
shows the plot in a window. To continue you might need to
close the pop-up window. This behavior seems to disappear in
subsequent calls of `cma.plot()` and is avoided by using
`ipython` with `-pylab` option. Finally ::
cma.savefig('myfirstrun') # savefig from matplotlib.pylab
will save the figure in a png.
:See: `CMAEvolutionStrategy`, `OOOptimizer.optimize(), `plot()`, `Options`, `scipy.optimize.fmin()`
""" # style guides say there should be the above empty line
try: # pass on KeyboardInterrupt
opts = locals() # collect all local variables (i.e. arguments) in a dictionary
del opts['func'] # remove those without a default value
del opts['args']
del opts['x0'] # is not optional, no default available
del opts['sigma0'] # is not optional for the constructor CMAEvolutionStrategy
if not func: # return available options in a dictionary
return Options(opts, True) # these opts are by definition valid
# TODO: this is very ugly:
incpopsize = Options({'incpopsize':incpopsize}).eval('incpopsize')
restarts = Options({'restarts':restarts}).eval('restarts')
del opts['restarts']
noise_handling = Options({'noise_handling': noise_handling}).eval('noise_handling')
del opts['noise_handling']# otherwise CMA throws an error
irun = 0
best = BestSolution()
while 1:
# recover from a CMA object
if irun == 0 and isinstance(x0, CMAEvolutionStrategy):
es = x0
x0 = es.inputargs['x0'] # for the next restarts
if sigma0 is None or not np.isscalar(array(sigma0)):
sigma0 = es.inputargs['sigma0'] # for the next restarts
# ignore further input args and keep original options
else: # default case
es = CMAEvolutionStrategy(x0, sigma0, opts)
if opts['eval_initial_x']:
x = es.gp.pheno(es.mean, bounds=es.gp.bounds)
es.best.update([x], None, [func(x, *args)], 1)
es.countevals += 1
opts = es.opts # processed options, unambiguous
append = opts['verb_append'] or es.countiter > 0 or irun > 0
logger = CMADataLogger(opts['verb_filenameprefix'], opts['verb_log'])
logger.register(es, append).add() # initial values, not fitness values
# if es.countiter == 0 and es.opts['verb_log'] > 0 and not es.opts['verb_append']:
# logger = CMADataLogger(es.opts['verb_filenameprefix']).register(es)
# logger.add()
# es.writeOutput() # initial values for sigma etc
noisehandler = NoiseHandler(es.N, noise_handling, np.median, opts['noise_reevals'], opts['noise_eps'], opts['eval_parallel'])
while not es.stop():
X, fit = es.ask_and_eval(func, args, evaluations=noisehandler.evaluations,
aggregation=np.median) # treats NaN with resampling
# TODO: check args and in case use args=(noisehandler.evaluations, )
if 11 < 3 and opts['vv']: # inject a solution
# use option check_point = [0]
if 0 * np.random.randn() >= 0:
X[0] = 0 + opts['vv'] * es.sigma**0 * np.random.randn(es.N)
fit[0] = func(X[0], *args)
# print fit[0]
es.tell(X, fit) # prepare for next iteration
if noise_handling:
es.sigma *= noisehandler(X, fit, func, es.ask, args)**opts['noise_change_sigma']
es.countevals += noisehandler.evaluations_just_done # TODO: this is a hack, not important though
es.disp()
logger.add(more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [],
modulo=1 if es.stop() and logger.modulo else None)
if opts['verb_log'] and opts['verb_plot'] and \
(es.countiter % max(opts['verb_plot'], opts['verb_log']) == 0 or es.stop()):
logger.plot(324, fontsize=10)
# end while not es.stop
mean_pheno = es.gp.pheno(es.mean, bounds=es.gp.bounds)
fmean = func(mean_pheno, *args)
es.countevals += 1
es.best.update([mean_pheno], None, [fmean], es.countevals)
best.update(es.best) # in restarted case
# final message
if opts['verb_disp']:
for k, v in es.stop().items():
print('termination on %s=%s (%s)' % (k, str(v), time.asctime()))
print('final/bestever f-value = %e %e' % (es.best.last.f, best.f))
if es.N < 9:
print('mean solution: ' + str(es.gp.pheno(es.mean)))
print('std deviation: ' + str(es.sigma * sqrt(es.dC) * es.gp.scales))
else:
print('mean solution: %s ...]' % (str(es.gp.pheno(es.mean)[:8])[:-1]))
print('std deviations: %s ...]' % (str((es.sigma * sqrt(es.dC) * es.gp.scales)[:8])[:-1]))
irun += 1
if irun > restarts or 'ftarget' in es.stopdict or 'maxfunevals' in es.stopdict:
break
opts['verb_append'] = es.countevals
opts['popsize'] = incpopsize * es.sp.popsize # TODO: use rather options?
opts['seed'] += 1
# while irun
es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell
if 1 < 3:
return es.result() + (es.stop(), es, logger)
else: # previously: to be removed
return (best.x.copy(), best.f, es.countevals,
dict((('stopdict', CMAStopDict(es.stopdict))
,('mean', es.gp.pheno(es.mean))
,('std', es.sigma * sqrt(es.dC) * es.gp.scales)
,('out', es.out)
,('opts', es.opts) # last state of options
,('cma', es)
,('inputargs', es.inputargs)
))
)
# TODO refine output, can #args be flexible?
# is this well usable as it is now?
except KeyboardInterrupt: # Exception, e:
if opts['verb_disp'] > 0:
print(' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception')
raise # cave: swallowing this exception can silently mess up experiments, if ctrl-C is hit
def plot(name=None, fig=None, abscissa=1, iteridx=None, plot_mean=True, # TODO: plot_mean default should be False
foffset=1e-19, x_opt=None, fontsize=10):
"""
plot data from files written by a `CMADataLogger`,
the call ``cma.plot(name, **argsdict)`` is a shortcut for
``cma.CMADataLogger(name).plot(**argsdict)``
Arguments
---------
`name`
name of the logger, filename prefix, None evaluates to
the default 'outcmaes'
`fig`
filename or figure number, or both as a tuple (any order)
`abscissa`
0==plot versus iteration count,
1==plot versus function evaluation number
`iteridx`
iteration indices to plot
Return `None`
Examples
--------
::
cma.plot(); # the optimization might be still
# running in a different shell
cma.show() # to continue you might need to close the pop-up window
# once and call cma.plot() again.
# This behavior seems to disappear in subsequent
# calls of cma.plot(). Also using ipython with -pylab
# option might help.
cma.savefig('fig325.png')
cma.close()
cdl = cma.CMADataLogger().downsampling().plot()
Details
-------
Data from codes in other languages (C, Java, Matlab, Scilab) have the same
format and can be plotted just the same.
:See: `CMADataLogger`, `CMADataLogger.plot()`
"""
CMADataLogger(name).plot(fig, abscissa, iteridx, plot_mean, foffset, x_opt, fontsize)
def disp(name=None, idx=None):
"""displays selected data from (files written by) the class `CMADataLogger`.
The call ``cma.disp(name, idx)`` is a shortcut for ``cma.CMADataLogger(name).disp(idx)``.
Arguments
---------
`name`
name of the logger, filename prefix, `None` evaluates to
the default ``'outcmaes'``
`idx`
indices corresponding to rows in the data file; by
default the first five, then every 100-th, and the last
10 rows. Too large index values are removed.
Examples
--------
::
import cma, numpy
# assume some data are available from previous runs
cma.disp(None,numpy.r_[0,-1]) # first and last
cma.disp(None,numpy.r_[0:1e9:100,-1]) # every 100-th and last
cma.disp(idx=numpy.r_[0,-10:0]) # first and ten last
cma.disp(idx=numpy.r_[0:1e9:1e3,-10:0])
:See: `CMADataLogger.disp()`
"""
return CMADataLogger(name if name else 'outcmaes'
).disp(idx)
#____________________________________________________________
def _fileToMatrix(file_name):
"""rudimentary method to read in data from a file"""
# TODO: np.loadtxt() might be an alternative
# try:
if 1 < 3:
lres = []
for line in open(file_name, 'r').readlines():
if len(line) > 0 and line[0] not in ('%', '#'):
lres.append(map(float, line.split()))
res = lres
else:
fil = open(file_name, 'r')
fil.readline() # rudimentary, assume one comment line
lineToRow = lambda line: map(float, line.split())
res = map(lineToRow, fil.readlines())
fil.close() # close file could be omitted, reference counting should do during garbage collection, but...
while res != [] and res[0] == []: # remove further leading empty lines
del res[0]
return res
# except:
print('could not read file ' + file_name)
#____________________________________________________________
#____________________________________________________________
class NoiseHandler(object):
"""Noise handling according to [Hansen et al 2009, A Method for Handling
Uncertainty in Evolutionary Optimization...]
The interface of this class is yet versatile and subject to changes.
The attribute ``evaluations`` serves to control the noise via number of
evaluations, for example with `ask_and_eval()`, see also parameter
``maxevals`` and compare the example.
Example
-------
>>> import cma, numpy as np
>>> func = cma.Fcts.noisysphere
>>> es = cma.CMAEvolutionStrategy(np.ones(10), 1)
>>> logger = cma.CMADataLogger().register(es)
>>> nh = cma.NoiseHandler(es.N, maxevals=[1, 30])
>>> while not es.stop():
... X, fit = es.ask_and_eval(func, evaluations=nh.evaluations)
... es.tell(X, fit) # prepare for next iteration
... es.sigma *= nh(X, fit, func, es.ask) # see method __call__
... es.countevals += nh.evaluations_just_done # this is a hack, not important though
... logger.add(more_data = [nh.evaluations, nh.noiseS]) # add a data point
... es.disp()
... # nh.maxevals = ... it might be useful to start with smaller values and then increase
>>> print(es.stop())
>>> print(es.result()[-2]) # take mean value, the best solution is totally off
>>> assert sum(es.result()[-2]**2) < 1e-9
>>> print(X[np.argmin(fit)]) # not bad, but probably worse than the mean
>>> logger.plot()
The noise options of `fmin()` control a `NoiseHandler` instance similar to this
example. The command ``cma.Options('noise')`` lists in effect the parameters of
`__init__` apart from ``aggregate``.
Details
-------
The parameters reevals, theta, c_s, and alpha_t are set differently
than in the original publication, see method `__init__()`. For a
very small population size, say popsize <= 5, the measurement
technique based on rank changes is likely to fail.
Missing Features
----------------
In case no noise is found, ``self.lam_reeval`` should be adaptive
and get at least as low as 1 (however the possible savings from this
are rather limited). Another option might be to decide during the
first call by a quantitative analysis of fitness values whether
``lam_reeval`` is set to zero. More generally, an automatic noise
mode detection might also set the covariance matrix learning rates
to smaller values.
:See: `fmin()`, `ask_and_eval()`
"""
def __init__(self, N, maxevals=10, aggregate=np.median, reevals=None, epsilon=1e-7, parallel=False):
"""parameters are
`N`
dimension
`maxevals`
maximal value for ``self.evaluations``, where
``self.evaluations`` function calls are aggregated for
noise treatment. With ``maxevals == 0`` the noise
handler is (temporarily) "switched off". If `maxevals`
is a list, min value and (for >2 elements) median are
used to define minimal and initial value of
``self.evaluations``. Choosing ``maxevals > 1`` is only
reasonable, if also the original ``fit`` values (that
are passed to `__call__`) are computed by aggregation of
``self.evaluations`` values (otherwise the values are
not comparable), as it is done within `fmin()`.
`aggregate`
function to aggregate single f-values to a 'fitness', e.g.
``np.median``.
`reevals`
number of solutions to be reevaluated for noise measurement,
can be a float, by default set to ``1.5 + popsize/20``,
zero switches noise handling off.
`epsilon`
multiplier for perturbation of the reevaluated solutions
`parallel`
a single f-call with all resampled solutions
:See: `fmin()`, `Options`, `CMAEvolutionStrategy.ask_and_eval()`
"""
self.lam_reeval = reevals # 2 + popsize/20, see method indices(), originally 2 + popsize/10
self.epsilon = epsilon
self.parallel = parallel
self.theta = 0.5 # originally 0.2
self.cum = 0.3 # originally 1, 0.3 allows one disagreement of current point with resulting noiseS
self.alphasigma = 1 + 2 / (N+10)
self.alphaevals = 1 + 2 / (N+10) # originally 1.5
self.alphaevalsdown = self.alphaevals**-0.25 # originally 1/1.5
self.evaluations = 1 # to aggregate for a single f-evaluation
self.minevals = 1
self.maxevals = int(np.max(maxevals))
if hasattr(maxevals, '__contains__'): # i.e. can deal with ``in``
if len(maxevals) > 1:
self.minevals = min(maxevals)
self.evaluations = self.minevals
if len(maxevals) > 2:
self.evaluations = np.median(maxevals)
self.f_aggregate = aggregate
self.evaluations_just_done = 0 # actually conducted evals, only for documentation
self.noiseS = 0
def __call__(self, X, fit, func, ask=None, args=()):
"""proceed with noise measurement, set anew attributes ``evaluations``
(proposed number of evaluations to "treat" noise) and ``evaluations_just_done``
and return a factor for increasing sigma.
Parameters
----------
`X`
a list/sequence/vector of solutions
`fit`
the respective list of function values
`func`
the objective function, ``fit[i]`` corresponds to ``func(X[i], *args)``
`ask`
a method to generate a new, slightly disturbed solution. The argument
is mandatory if ``epsilon`` is not zero, see `__init__()`.
`args`
optional additional arguments to `func`
Details
-------
Calls the methods ``reeval()``, ``update_measure()`` and ``treat()`` in this order.
``self.evaluations`` is adapted within the method `treat()`.
"""
self.evaluations_just_done = 0
if not self.maxevals or self.lam_reeval == 0:
return 1.0
res = self.reeval(X, fit, func, ask, args)
if not len(res):
return 1.0
self.update_measure()
return self.treat()
def get_evaluations(self):
"""return ``self.evaluations``, the number of evalutions to get a single fitness measurement"""
return self.evaluations
def treat(self):
"""adapt self.evaluations depending on the current measurement value
and return ``sigma_fac in (1.0, self.alphasigma)``
"""
if self.noiseS > 0:
self.evaluations = min((self.evaluations * self.alphaevals, self.maxevals))
return self.alphasigma
else:
self.evaluations = max((self.evaluations * self.alphaevalsdown, self.minevals))
return 1.0
def reeval(self, X, fit, func, ask, args=()):
"""store two fitness lists, `fit` and ``fitre`` reevaluating some
solutions in `X`.
``self.evaluations`` evaluations are done for each reevaluated
fitness value.
See `__call__()`, where `reeval()` is called.
"""
self.fit = list(fit)
self.fitre = list(fit)
self.idx = self.indices(fit)
if not len(self.idx):
return self.idx
evals = int(self.evaluations) if self.f_aggregate else 1
fagg = np.median if self.f_aggregate is None else self.f_aggregate
for i in self.idx:
if self.epsilon:
if self.parallel:
self.fitre[i] = fagg(func(ask(evals, X[i], self.epsilon), *args))
else:
self.fitre[i] = fagg([func(ask(1, X[i], self.epsilon)[0], *args) for _k in xrange(evals)])
else:
self.fitre[i] = fagg([func(X[i], *args) for _k in xrange(evals)])
self.evaluations_just_done = evals * len(self.idx)
return self.fit, self.fitre, self.idx
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that `self.idx` contains the indices where the fitness
lists differ
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0,i] + 1 - (ranks[0,i] > ranks[1,i]))),
self.theta*50) +
Mh.prctile(np.abs(r - (ranks[1,i] + 1 - (ranks[1,i] > ranks[0,i]))),
self.theta*50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s
def indices(self, fit):
"""return the set of indices to be reevaluted for noise measurement,
taking the ``lam_reeval`` best from the first ``2 * lam_reeval + 2``
values.
Given the first values are the earliest, this is a useful policy also
with a time changing objective.
"""
lam = self.lam_reeval if self.lam_reeval else 2 + len(fit) / 20
reev = int(lam) + ((lam % 1) > np.random.rand())
return np.argsort(array(fit, copy=False)[:2 * (reev + 1)])[:reev]
#____________________________________________________________
#____________________________________________________________
class Sections(object):
"""plot sections through an objective function. A first
rational thing to do, when facing an (expensive) application.
By default 6 points in each coordinate are evaluated.
This class is still experimental.
Examples
--------
>>> import cma, numpy as np
>>> s = cma.Sections(cma.Fcts.rosen, np.zeros(3)).do(plot=False)
>>> s.do(plot=False) # evaluate the same points again, i.e. check for noise
>>> try:
... s.plot()
... except:
... print('plotting failed: pylab package is missing?')
Details
-------
Data are saved after each function call during `do()`. The filename is attribute
``name`` and by default ``str(func)``, see `__init__()`.
A random (orthogonal) basis can be generated with ``cma.Rotation()(np.eye(3))``.
The default name is unique in the function name, but it should be unique in all
parameters of `__init__()` but `plot_cmd` and `load`.
``self.res`` is a dictionary with an entry for each "coordinate" ``i`` and with an
entry ``'x'``, the middle point. Each entry ``i`` is again a dictionary with keys
being different dx values and the value being a sequence of f-values.
For example ``self.res[2][0.1] == [0.01, 0.01]``, which is generated using the
difference vector ``self.basis[2]`` like
``self.res[2][dx] += func(self.res['x'] + dx * self.basis[2])``.
:See: `__init__()`
"""
def __init__(self, func, x, args=(), basis=None, name=None,
plot_cmd=pylab.plot if pylab else None, load=True):
"""
Parameters
----------
`func`
objective function
`x`
point in search space, middle point of the sections
`args`
arguments passed to `func`
`basis`
evaluated points are ``func(x + locations[j] * basis[i]) for i in len(basis) for j in len(locations)``,
see `do()`
`name`
filename where to save the result
`plot_cmd`
command used to plot the data, typically matplotlib pylabs `plot` or `semilogy`
`load`
load previous data from file ``str(func) + '.pkl'``
"""
self.func = func
self.args = args
self.x = x
self.name = name if name else str(func).replace(' ', '_').replace('>', '').replace('<', '')
self.plot_cmd = plot_cmd # or semilogy
self.basis = np.eye(len(x)) if basis is None else basis
try:
self.load()
if any(self.res['x'] != x):
self.res = {}
self.res['x'] = x # TODO: res['x'] does not look perfect
else:
print(self.name + ' loaded')
except:
self.res = {}
self.res['x'] = x
def do(self, repetitions=1, locations=np.arange(-0.5, 0.6, 0.2), plot=True):
"""generates, plots and saves function values ``func(y)``,
where ``y`` is 'close' to `x` (see `__init__()`). The data are stored in
the ``res`` attribute and the class instance is saved in a file
with (the weired) name ``str(func)``.
Parameters
----------
`repetitions`
for each point, only for noisy functions is >1 useful. For
``repetitions==0`` only already generated data are plotted.
`locations`
coordinated wise deviations from the middle point given in `__init__`
"""
if not repetitions:
self.plot()
return
res = self.res
for i in range(len(self.basis)): # i-th coordinate
if i not in res:
res[i] = {}
# xx = np.array(self.x)
# TODO: store res[i]['dx'] = self.basis[i] here?
for dx in locations:
xx = self.x + dx * self.basis[i]
xkey = dx # xx[i] if (self.basis == np.eye(len(self.basis))).all() else dx
if xkey not in res[i]:
res[i][xkey] = []
n = repetitions
while n > 0:
n -= 1
res[i][xkey].append(self.func(xx, *self.args))
if plot:
self.plot()
self.save()
return self
def plot(self, plot_cmd=None, tf=lambda y: y):
"""plot the data we have, return ``self``"""
if not plot_cmd:
plot_cmd = self.plot_cmd
colors = 'bgrcmyk'
pylab.hold(False)
res = self.res
flatx, flatf = self.flattened()
minf = np.inf
for i in flatf:
minf = min((minf, min(flatf[i])))
addf = 1e-9 - minf if minf <= 0 else 0
for i in sorted(res.keys()): # we plot not all values here
if type(i) is int:
color = colors[i % len(colors)]
arx = sorted(res[i].keys())
plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')
pylab.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)
pylab.hold(True)
plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')
pylab.ylabel('f + ' + str(addf))
pylab.draw()
show()
# raw_input('press return')
return self
def flattened(self):
"""return flattened data ``(x, f)`` such that for the sweep through
coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])``
"""
flatx = {}
flatf = {}
for i in self.res:
if type(i) is int:
flatx[i] = []
flatf[i] = []
for x in sorted(self.res[i]):
for d in sorted(self.res[i][x]):
flatx[i].append(x)
flatf[i].append(d)
return flatx, flatf
def save(self, name=None):
"""save to file"""
import pickle
name = name if name else self.name
fun = self.func
del self.func # instance method produces error
pickle.dump(self, open(name + '.pkl', "wb" ))
self.func = fun
return self
def load(self, name=None):
"""load from file"""
import pickle
name = name if name else self.name
s = pickle.load(open(name + '.pkl', 'r'))
self.res = s.res # disregard the class
return self
#____________________________________________________________
#____________________________________________________________
class _Error(Exception):
"""generic exception of cma module"""
pass
#____________________________________________________________
#____________________________________________________________
#
class ElapsedTime(object):
"""32-bit C overflows after int(2**32/1e6) == 4294s about 72 min"""
def __init__(self):
self.tic0 = time.clock()
self.tic = self.tic0
self.lasttoc = time.clock()
self.lastdiff = time.clock() - self.lasttoc
self.time_to_add = 0
self.messages = 0
def __call__(self):
toc = time.clock()
if toc - self.tic >= self.lasttoc - self.tic:
self.lastdiff = toc - self.lasttoc
self.lasttoc = toc
else: # overflow, reset self.tic
if self.messages < 3:
self.messages += 1
print(' in cma.ElapsedTime: time measure overflow, last difference estimated from',
self.tic0, self.tic, self.lasttoc, toc, toc - self.lasttoc, self.lastdiff)
self.time_to_add += self.lastdiff + self.lasttoc - self.tic
self.tic = toc # reset
self.lasttoc = toc
self.elapsedtime = toc - self.tic + self.time_to_add
return self.elapsedtime
#____________________________________________________________
#____________________________________________________________
#
class TimeIt(object):
def __init__(self, fct, args=(), seconds=1):
pass
class Misc(object):
#____________________________________________________________
#____________________________________________________________
#
class MathHelperFunctions(object):
"""static convenience math helper functions, if the function name
is preceded with an "a", a numpy array is returned
"""
@staticmethod
def aclamp(x, upper):
return -Misc.MathHelperFunctions.apos(-x, -upper)
@staticmethod
def expms(A, eig=np.linalg.eigh):
"""matrix exponential for a symmetric matrix"""
# TODO: check that this works reliably for low rank matrices
# first: symmetrize A
D, B = eig(A)
return np.dot(B, (np.exp(D) * B).T)
@staticmethod
def amax(vec, vec_or_scalar):
return array(Misc.MathHelperFunctions.max(vec, vec_or_scalar))
@staticmethod
def max(vec, vec_or_scalar):
b = vec_or_scalar
if np.isscalar(b):
m = [max(x, b) for x in vec]
else:
m = [max(vec[i], b[i]) for i in xrange(len(vec))]
return m
@staticmethod
def amin(vec_or_scalar, vec_or_scalar2):
return array(Misc.MathHelperFunctions.min(vec_or_scalar, vec_or_scalar2))
@staticmethod
def min(a, b):
iss = np.isscalar
if iss(a) and iss(b):
return min(a, b)
if iss(a):
a, b = b, a
# now only b can be still a scalar
if iss(b):
return [min(x, b) for x in a]
else: # two non-scalars must have the same length
return [min(a[i], b[i]) for i in xrange(len(a))]
@staticmethod
def norm(vec, expo=2):
return sum(vec**expo)**(1/expo)
@staticmethod
def apos(x, lower=0):
"""clips argument (scalar or array) from below at lower"""
if lower == 0:
return (x > 0) * x
else:
return lower + (x > lower) * (x - lower)
@staticmethod
def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):
"""``prctile(data, 50)`` returns the median, but p_vals can
also be a sequence.
Provides for small samples better values than matplotlib.mlab.prctile,
however also slower.
"""
ps = [p_vals] if np.isscalar(p_vals) else p_vals
if not sorted_:
data = sorted(data)
n = len(data)
d = []
for p in ps:
fi = p * n / 100 - 0.5
if fi <= 0: # maybe extrapolate?
d.append(data[0])
elif fi >= n - 1:
d.append(data[-1])
else:
i = int(fi)
d.append((i+1 - fi) * data[i] + (fi - i) * data[i+1])
return d[0] if np.isscalar(p_vals) else d
@staticmethod
def sround(nb): # TODO: to be vectorized
"""return stochastic round: floor(nb) + (rand()<remainder(nb))"""
return nb // 1 + (np.random.rand(1)[0] < (nb % 1))
@staticmethod
def cauchy_with_variance_one():
n = np.random.randn() / np.random.randn()
while abs(n) > 1000:
n = np.random.randn() / np.random.randn()
return n / 25
@staticmethod
def standard_finite_cauchy(size=1):
try:
l = len(size)
except TypeError:
l = 0
if l == 0:
return array([Mh.cauchy_with_variance_one() for _i in xrange(size)])
elif l == 1:
return array([Mh.cauchy_with_variance_one() for _i in xrange(size[0])])
elif l == 2:
return array([[Mh.cauchy_with_variance_one() for _i in xrange(size[1])]
for _j in xrange(size[0])])
else:
raise _Error('len(size) cannot be large than two')
@staticmethod
def likelihood(x, m=None, Cinv=None, sigma=1, detC=None):
"""return likelihood of x for the normal density N(m, sigma**2 * Cinv**-1)"""
# testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
# for i in range(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
if m is None:
dx = x
else:
dx = x - m # array(x) - array(m)
n = len(x)
s2pi = (2*np.pi)**(n/2.)
if Cinv is None:
return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
if detC is None:
detC = 1. / np.linalg.linalg.det(Cinv)
return exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n
@staticmethod
def loglikelihood(self, x, previous=False):
"""return log-likelihood of `x` regarding the current sample distribution"""
# testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
# for i in range(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
# TODO: test this!!
# c=cma.fmin...
# c[3]['cma'].loglikelihood(...)
if previous and hasattr(self, 'lastiter'):
sigma = self.lastiter.sigma
Crootinv = self.lastiter._Crootinv
xmean = self.lastiter.mean
D = self.lastiter.D
elif previous and self.countiter > 1:
raise _Error('no previous distribution parameters stored, check options importance_mixing')
else:
sigma = self.sigma
Crootinv = self._Crootinv
xmean = self.mean
D = self.D
dx = array(x) - xmean # array(x) - array(m)
n = self.N
logs2pi = n * log(2*np.pi) / 2.
logdetC = 2 * sum(log(D))
dx = np.dot(Crootinv, dx)
res = -sum(dx**2) / sigma**2 / 2 - logs2pi - logdetC/2 - n*log(sigma)
if 1 < 3: # testing
s2pi = (2*np.pi)**(n/2.)
detC = np.prod(D)**2
res2 = -sum(dx**2) / sigma**2 / 2 - log(s2pi * abs(detC)**0.5 * sigma**n)
assert res2 < res + 1e-8 or res2 > res - 1e-8
return res
#____________________________________________________________
#____________________________________________________________
#
# C and B are arrays rather than matrices, because they are
# addressed via B[i][j], matrices can only be addressed via B[i,j]
# tred2(N, B, diagD, offdiag);
# tql2(N, diagD, offdiag, B);
# Symmetric Householder reduction to tridiagonal form, translated from JAMA package.
@staticmethod
def eig(C):
"""eigendecomposition of a symmetric matrix, much slower than
`numpy.linalg.eigh`, return ``(EVals, Basis)``, the eigenvalues
and an orthonormal basis of the corresponding eigenvectors, where
``Basis[i]``
the i-th row of ``Basis``
columns of ``Basis``, ``[Basis[j][i] for j in range(len(Basis))]``
the i-th eigenvector with eigenvalue ``EVals[i]``
"""
# class eig(object):
# def __call__(self, C):
# Householder transformation of a symmetric matrix V into tridiagonal form.
# -> n : dimension
# -> V : symmetric nxn-matrix
# <- V : orthogonal transformation matrix:
# tridiag matrix == V * V_in * V^t
# <- d : diagonal
# <- e[0..n-1] : off diagonal (elements 1..n-1)
# Symmetric tridiagonal QL algorithm, iterative
# Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3 operations
# -> n : Dimension.
# -> d : Diagonale of tridiagonal matrix.
# -> e[1..n-1] : off-diagonal, output from Householder
# -> V : matrix output von Householder
# <- d : eigenvalues
# <- e : garbage?
# <- V : basis of eigenvectors, according to d
# tred2(N, B, diagD, offdiag); B=C on input
# tql2(N, diagD, offdiag, B);
# private void tred2 (int n, double V[][], double d[], double e[]) {
def tred2 (n, V, d, e):
# This is derived from the Algol procedures tred2 by
# Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
# Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
# Fortran subroutine in EISPACK.
num_opt = False # factor 1.5 in 30-D
for j in range(n):
d[j] = V[n-1][j] # d is output argument
# Householder reduction to tridiagonal form.
for i in range(n-1,0,-1):
# Scale to avoid under/overflow.
h = 0.0
if not num_opt:
scale = 0.0
for k in range(i):
scale = scale + abs(d[k])
else:
scale = sum(abs(d[0:i]))
if scale == 0.0:
e[i] = d[i-1]
for j in range(i):
d[j] = V[i-1][j]
V[i][j] = 0.0
V[j][i] = 0.0
else:
# Generate Householder vector.
if not num_opt:
for k in range(i):
d[k] /= scale
h += d[k] * d[k]
else:
d[:i] /= scale
h = np.dot(d[:i],d[:i])
f = d[i-1]
g = h**0.5
if f > 0:
g = -g
e[i] = scale * g
h = h - f * g
d[i-1] = f - g
if not num_opt:
for j in range(i):
e[j] = 0.0
else:
e[:i] = 0.0
# Apply similarity transformation to remaining columns.
for j in range(i):
f = d[j]
V[j][i] = f
g = e[j] + V[j][j] * f
if not num_opt:
for k in range(j+1, i):
g += V[k][j] * d[k]
e[k] += V[k][j] * f
e[j] = g
else:
e[j+1:i] += V.T[j][j+1:i] * f
e[j] = g + np.dot(V.T[j][j+1:i],d[j+1:i])
f = 0.0
if not num_opt:
for j in range(i):
e[j] /= h
f += e[j] * d[j]
else:
e[:i] /= h
f += np.dot(e[:i],d[:i])
hh = f / (h + h)
if not num_opt:
for j in range(i):
e[j] -= hh * d[j]
else:
e[:i] -= hh * d[:i]
for j in range(i):
f = d[j]
g = e[j]
if not num_opt:
for k in range(j, i):
V[k][j] -= (f * e[k] + g * d[k])
else:
V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])
d[j] = V[i-1][j]
V[i][j] = 0.0
d[i] = h
# end for i--
# Accumulate transformations.
for i in range(n-1):
V[n-1][i] = V[i][i]
V[i][i] = 1.0
h = d[i+1]
if h != 0.0:
if not num_opt:
for k in range(i+1):
d[k] = V[k][i+1] / h
else:
d[:i+1] = V.T[i+1][:i+1] / h
for j in range(i+1):
if not num_opt:
g = 0.0
for k in range(i+1):
g += V[k][i+1] * V[k][j]
for k in range(i+1):
V[k][j] -= g * d[k]
else:
g = np.dot(V.T[i+1][0:i+1], V.T[j][0:i+1])
V.T[j][:i+1] -= g * d[:i+1]
if not num_opt:
for k in range(i+1):
V[k][i+1] = 0.0
else:
V.T[i+1][:i+1] = 0.0
if not num_opt:
for j in range(n):
d[j] = V[n-1][j]
V[n-1][j] = 0.0
else:
d[:n] = V[n-1][:n]
V[n-1][:n] = 0.0
V[n-1][n-1] = 1.0
e[0] = 0.0
# Symmetric tridiagonal QL algorithm, taken from JAMA package.
# private void tql2 (int n, double d[], double e[], double V[][]) {
# needs roughly 3N^3 operations
def tql2 (n, d, e, V):
# This is derived from the Algol procedures tql2, by
# Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
# Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
# Fortran subroutine in EISPACK.
num_opt = False # using vectors from numpy makes it faster
if not num_opt:
for i in range(1,n): # (int i = 1; i < n; i++):
e[i-1] = e[i]
else:
e[0:n-1] = e[1:n]
e[n-1] = 0.0
f = 0.0
tst1 = 0.0
eps = 2.0**-52.0
for l in range(n): # (int l = 0; l < n; l++) {
# Find small subdiagonal element
tst1 = max(tst1, abs(d[l]) + abs(e[l]))
m = l
while m < n:
if abs(e[m]) <= eps*tst1:
break
m += 1
# If m == l, d[l] is an eigenvalue,
# otherwise, iterate.
if m > l:
iiter = 0
while 1: # do {
iiter += 1 # (Could check iteration count here.)
# Compute implicit shift
g = d[l]
p = (d[l+1] - g) / (2.0 * e[l])
r = (p**2 + 1)**0.5 # hypot(p,1.0)
if p < 0:
r = -r
d[l] = e[l] / (p + r)
d[l+1] = e[l] * (p + r)
dl1 = d[l+1]
h = g - d[l]
if not num_opt:
for i in range(l+2, n):
d[i] -= h
else:
d[l+2:n] -= h
f = f + h
# Implicit QL transformation.
p = d[m]
c = 1.0
c2 = c
c3 = c
el1 = e[l+1]
s = 0.0
s2 = 0.0
# hh = V.T[0].copy() # only with num_opt
for i in range(m-1, l-1, -1): # (int i = m-1; i >= l; i--) {
c3 = c2
c2 = c
s2 = s
g = c * e[i]
h = c * p
r = (p**2 + e[i]**2)**0.5 # hypot(p,e[i])
e[i+1] = s * r
s = e[i] / r
c = p / r
p = c * d[i] - s * g
d[i+1] = h + s * (c * g + s * d[i])
# Accumulate transformation.
if not num_opt: # overall factor 3 in 30-D
for k in range(n): # (int k = 0; k < n; k++) {
h = V[k][i+1]
V[k][i+1] = s * V[k][i] + c * h
V[k][i] = c * V[k][i] - s * h
else: # about 20% faster in 10-D
hh = V.T[i+1].copy()
# hh[:] = V.T[i+1][:]
V.T[i+1] = s * V.T[i] + c * hh
V.T[i] = c * V.T[i] - s * hh
# V.T[i] *= c
# V.T[i] -= s * hh
p = -s * s2 * c3 * el1 * e[l] / dl1
e[l] = s * p
d[l] = c * p
# Check for convergence.
if abs(e[l]) <= eps*tst1:
break
# } while (Math.abs(e[l]) > eps*tst1);
d[l] = d[l] + f
e[l] = 0.0
# Sort eigenvalues and corresponding vectors.
if 11 < 3:
for i in range(n-1): # (int i = 0; i < n-1; i++) {
k = i
p = d[i]
for j in range(i+1, n): # (int j = i+1; j < n; j++) {
if d[j] < p: # NH find smallest k>i
k = j
p = d[j]
if k != i:
d[k] = d[i] # swap k and i
d[i] = p
for j in range(n): # (int j = 0; j < n; j++) {
p = V[j][i]
V[j][i] = V[j][k]
V[j][k] = p
# tql2
N = len(C[0])
if 11 < 3:
V = np.array([x[:] for x in C]) # copy each "row"
N = V[0].size
d = np.zeros(N)
e = np.zeros(N)
else:
V = [[x[i] for i in xrange(N)] for x in C] # copy each "row"
d = N * [0.]
e = N * [0.]
tred2(N, V, d, e)
tql2(N, d, e, V)
return (array(d), array(V))
Mh = Misc.MathHelperFunctions
def pprint(to_be_printed):
"""nicely formated print"""
try:
import pprint as pp
# generate an instance PrettyPrinter
# pp.PrettyPrinter().pprint(to_be_printed)
pp.pprint(to_be_printed)
except ImportError:
print('could not use pprint module, will apply regular print')
print(to_be_printed)
class Rotation(object):
"""Rotation class that implements an orthogonal linear transformation,
one for each dimension. Used to implement non-separable test functions.
Example:
>>> import cma, numpy as np
>>> R = cma.Rotation()
>>> R2 = cma.Rotation() # another rotation
>>> x = np.array((1,2,3))
>>> print(R(R(x), inverse=1))
[ 1. 2. 3.]
"""
dicMatrices = {} # store matrix if necessary, for each dimension
def __init__(self):
self.dicMatrices = {} # otherwise there might be shared bases which is probably not what we want
def __call__(self, x, inverse=False): # function when calling an object
"""Rotates the input array `x` with a fixed rotation matrix
(``self.dicMatrices['str(len(x))']``)
"""
N = x.shape[0] # can be an array or matrix, TODO: accept also a list of arrays?
if not self.dicMatrices.has_key(str(N)): # create new N-basis for once and all
B = np.random.randn(N, N)
for i in xrange(N):
for j in xrange(0, i):
B[i] -= np.dot(B[i], B[j]) * B[j]
B[i] /= sum(B[i]**2)**0.5
self.dicMatrices[str(N)] = B
if inverse:
return np.dot(self.dicMatrices[str(N)].T, x) # compute rotation
else:
return np.dot(self.dicMatrices[str(N)], x) # compute rotation
# Use rotate(x) to rotate x
rotate = Rotation()
#____________________________________________________________
#____________________________________________________________
#
class FitnessFunctions(object):
""" versatile container for test objective functions """
def __init__(self):
self.counter = 0 # number of calls or any other practical use
def rot(self, x, fun, rot=1, args=()):
"""returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument"""
if len(np.shape(array(x))) > 1: # parallelized
res = []
for x in x:
res.append(self.rot(x, fun, rot, args))
return res
if rot:
return fun(rotate(x, *args))
else:
return fun(x)
def somenan(self, x, fun, p=0.1):
"""returns sometimes np.NaN, otherwise fun(x)"""
if np.random.rand(1) < p:
return np.NaN
else:
return fun(x)
def rand(self, x):
"""Random test objective function"""
return np.random.random(1)[0]
def linear(self, x):
return -x[0]
def lineard(self, x):
if 1 < 3 and any(array(x) < 0):
return np.nan
if 1 < 3 and sum([ (10 + i) * x[i] for i in xrange(len(x))]) > 50e3:
return np.nan
return -sum(x)
def sphere(self, x):
"""Sphere (squared norm) test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
return sum((x+0)**2)
def spherewithoneconstraint(self, x):
return sum((x+0)**2) if x[0] > 1 else np.nan
def elliwithoneconstraint(self, x, idx=[-1]):
return self.ellirot(x) if all(array(x)[idx] > 1) else np.nan
def spherewithnconstraints(self, x):
return sum((x+0)**2) if all(array(x) > 1) else np.nan
def noisysphere(self, x, noise=5.0):
return sum((x+0)**2) * (1 + noise * np.random.randn() / len(x))
def spherew(self, x):
"""Sphere (squared norm) with sum x_i = 1 test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
# s = sum(abs(x))
# return sum((x/s+0)**2) - 1/len(x)
# return sum((x/s)**2) - 1/len(x)
return -0.01*x[0] + abs(x[0])**-2 * sum(x[1:]**2)
def partsphere(self, x):
"""Sphere (squared norm) test objective function"""
self.counter += 1
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
dim = len(x)
x = array([x[i % dim] for i in range(2*dim)])
N = 8
i = self.counter % dim
#f = sum(x[i:i + N]**2)
f = sum(x[np.random.randint(dim, size=N)]**2)
return f
def sectorsphere(self, x):
"""asymmetric Sphere (squared norm) test objective function"""
return sum(x**2) + (1e6-1) * sum(x[x<0]**2)
def cornersphere(self, x):
"""Sphere (squared norm) test objective function constraint to the corner"""
nconstr = len(x) - 0
if any(x[:nconstr] < 1):
return np.NaN
return sum(x**2) - nconstr
def cornerelli(self, x):
""" """
if any(x < 1):
return np.NaN
return self.elli(x) - self.elli(np.ones(len(x)))
def cornerellirot(self, x):
""" """
if any(x < 1):
return np.NaN
return self.ellirot(x)
def normalSkew(self, f):
N = np.random.randn(1)[0]**2
if N < 1:
N = f * N # diminish blow up lower part
return N
def noiseC(self, x, func=sphere, fac=10, expon=0.8):
f = func(self, x)
N = np.random.randn(1)[0]/np.random.randn(1)[0]
return max(1e-19, f + (float(fac)/len(x)) * f**expon * N)
def noise(self, x, func=sphere, fac=10, expon=1):
f = func(self, x)
#R = np.random.randn(1)[0]
R = np.log10(f) + expon * abs(10-np.log10(f)) * np.random.rand(1)[0]
# sig = float(fac)/float(len(x))
# R = log(f) + 0.5*log(f) * random.randn(1)[0]
# return max(1e-19, f + sig * (f**np.log10(f)) * np.exp(R))
# return max(1e-19, f * np.exp(sig * N / f**expon))
# return max(1e-19, f * normalSkew(f**expon)**sig)
return f + 10**R # == f + f**(1+0.5*RN)
def cigar(self, x, rot=0, cond=1e6):
"""Cigar test objective function"""
if rot:
x = rotate(x)
x = [x] if np.isscalar(x[0]) else x # scalar into list
f = [x[0]**2 + cond * sum(x[1:]**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def tablet(self, x, rot=0):
"""Tablet test objective function"""
if rot:
x = rotate(x)
x = [x] if np.isscalar(x[0]) else x # scalar into list
f = [1e6*x[0]**2 + sum(x[1:]**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def cigtab(self, y):
"""Cigtab test objective function"""
X = [y] if np.isscalar(y[0]) else y
f = [1e-4 * x[0]**2 + 1e4 * x[1]**2 + sum(x[2:]**2) for x in X]
return f if len(f) > 1 else f[0]
def twoaxes(self, y):
"""Cigtab test objective function"""
X = [y] if np.isscalar(y[0]) else y
N2 = len(X[0]) // 2
f = [1e6 * sum(x[0:N2]**2) + sum(x[N2:]**2) for x in X]
return f if len(f) > 1 else f[0]
def ellirot(self, x):
return fcts.elli(array(x), 1)
def hyperelli(self, x):
N = len(x)
return sum((np.arange(1, N+1) * x)**2)
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
"""Ellipsoid test objective function"""
if not np.isscalar(x[0]): # parallel evaluation
return [self.elli(xi, rot) for xi in x] # could save 20% overall
if rot:
x = rotate(x)
N = len(x)
if actuator_noise:
x = x + actuator_noise * np.random.randn(N)
ftrue = sum(cond**(np.arange(N)/(N-1.))*(x+xoffset)**2)
alpha = 0.49 + 1./N
beta = 1
felli = np.random.rand(1)[0]**beta * ftrue * \
max(1, (10.**9 / (ftrue+1e-99))**(alpha*np.random.rand(1)[0]))
# felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
# np.abs(np.random.randn(1)[0]))**0
if both:
return (felli, ftrue)
else:
# return felli # possibly noisy value
return ftrue # + np.random.randn()
def elliconstraint(self, x, cfac = 1e8, tough=True, cond=1e6):
"""ellipsoid test objective function with "constraints" """
N = len(x)
f = sum(cond**(np.arange(N)[-1::-1]/(N-1)) * x**2)
cvals = (x[0] + 1,
x[0] + 1 + 100*x[1],
x[0] + 1 - 100*x[1])
if tough:
f += cfac * sum(max(0,c) for c in cvals)
else:
f += cfac * sum(max(0,c+1e-3)**2 for c in cvals)
return f
def rosen(self, x):
"""Rosenbrock test objective function"""
x = [x] if np.isscalar(x[0]) else x # scalar into list
f = [sum(100.*(x[:-1]**2-x[1:])**2 + (1.-x[:-1])**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def diffpow(self, x, rot=0):
"""Diffpow test objective function"""
N = len(x)
if rot:
x = rotate(x)
return sum(np.abs(x)**(2.+4.*np.arange(N)/(N-1.)))**0.5
def ridge(self, x, expo=2):
x = [x] if np.isscalar(x[0]) else x # scalar into list
f = [x[0] + 100*np.sum(x[1:]**2)**(expo/2.) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def ridgecircle(self, x, expo=0.5):
"""happy cat by HG Beyer"""
a = len(x)
s = sum(x**2)
return ((s - a)**2)**(expo/2) + s/a + sum(x)/a
def flat(self,x):
return 1
return 1 if np.random.rand(1) < 0.9 else 1.1
return np.random.randint(1,30)
def branin(self, x):
# in [0,15]**2
y = x[1]
x = x[0] + 5
return (y - 5.1*x**2 / 4 / np.pi**2 + 5 * x / np.pi - 6)**2 + 10 * (1 - 1/8/np.pi) * np.cos(x) + 10 - 0.397887357729738160000
def goldsteinprice(self, x):
x1 = x[0]
x2 = x[1]
return (1 + (x1 +x2 + 1)**2 * (19 - 14 * x1 + 3 * x1**2 - 14 * x2 + 6 * x1 * x2 + 3 * x2**2)) * (
30 + (2 * x1 - 3 * x2)**2 * (18 - 32 * x1 + 12 * x1**2 + 48 * x2 - 36 * x1 * x2 + 27 * x2**2)) - 3
def griewank(self, x):
# was in [-600 600]
x = (600./5) * x
return 1 - np.prod(np.cos(x/sqrt(1.+np.arange(len(x))))) + sum(x**2)/4e3
def rastrigin(self, x):
"""Rastrigin test objective function"""
if not np.isscalar(x[0]):
N = len(x[0])
return [10*N + sum(xi**2 - 10*np.cos(2*np.pi*xi)) for xi in x]
# return 10*N + sum(x**2 - 10*np.cos(2*np.pi*x), axis=1)
N = len(x)
return 10*N + sum(x**2 - 10*np.cos(2*np.pi*x))
def schwefelelli(self, x):
s = 0
f = 0
for i in xrange(len(x)):
s += x[i]
f += s**2
return f
def schwefelmult(self, x, pen_fac = 1e4):
"""multimodal Schwefel function with domain -500..500"""
y = [x] if np.isscalar(x[0]) else x
N = len(y[0])
f = array([418.9829*N - 1.27275661e-5*N - sum(x * np.sin(np.abs(x)**0.5))
+ pen_fac * sum((abs(x) > 500) * (abs(x) - 500)**2) for x in y])
return f if len(f) > 1 else f[0]
def optprob(self, x):
n = np.arange(len(x)) + 1
f = n * x * (1-x)**(n-1)
return sum(1-f)
def lincon(self, x, theta=0.01):
"""ridge like linear function with one linear constraint"""
if x[0] < 0:
return np.NaN
return theta * x[1] + x[0]
fcts = FitnessFunctions()
Fcts = fcts # for cross compatibility, as if the functions were static members of class Fcts
#____________________________________________
#____________________________________________________________
def _test(module=None): # None is fine when called from inside the module
import doctest
print(doctest.testmod(module)) # this is pretty coool!
def process_test(stream=None):
""" """
import fileinput
s1 = ""
s2 = ""
s3 = ""
state = 0
for line in fileinput.input(stream): # takes argv as file or stdin
if 1 < 3:
s3 += line
if state < -1 and line.startswith('***'):
print(s3)
if line.startswith('***'):
s3 = ""
if state == -1: # found a failed example line
s1 += '\n\n*** Failed Example:' + line
s2 += '\n\n\n' # line
# state = 0 # wait for 'Expected:' line
if line.startswith('Expected:'):
state = 1
continue
elif line.startswith('Got:'):
state = 2
continue
elif line.startswith('***'): # marks end of failed example
state = 0
elif line.startswith('Failed example:'):
state = -1
elif line.startswith('Exception raised'):
state = -2
# in effect more else:
if state == 1:
s1 += line + ''
if state == 2:
s2 += line + ''
#____________________________________________________________
#____________________________________________________________
#
def main(argv=None):
"""to install and/or test from the command line use::
python cma.py [options | func dim sig0 [optkey optval][optkey optval]...]
--test (or -t) to run the doctest, ``--test -v`` to get (much) verbosity
and ``--test -q`` to run it quietly with output only in case of errors.
install to install cma.py (uses setup from distutils.core).
--fcts and --doc for more infos or start ipython --pylab.
Examples
--------
First, testing with the local python distribution::
python cma.py --test --quiet
If succeeded install (uses setup from distutils.core)::
python cma.py install
A single run on the ellipsoid function::
python cma.py elli 10 1
"""
if argv is None:
argv = sys.argv # should have better been sys.argv[1:]
# uncomment for unit test
# _test()
# handle input arguments, getopt might be helpful ;-)
if len(argv) >= 1: # function and help
if len(argv) == 1 or argv[1].startswith('-h') or argv[1].startswith('--help'):
print(main.__doc__)
fun = None
elif argv[1].startswith('-t') or argv[1].startswith('--test'):
import doctest
if len(argv) > 2 and (argv[2].startswith('--qu') or argv[2].startswith('-q')):
print('doctest for cma.py: launching (it might be necessary to close a few pop up windows to finish)')
fn = '__cma_doctest__.txt'
stdout = sys.stdout
try:
with open(fn, 'w') as f:
sys.stdout = f
doctest.testmod(report=True) # this is quite cool!
finally:
sys.stdout = stdout
process_test(fn)
print('doctest for cma.py: finished (no other output should be seen after launching)')
else:
print('doctest for cma.py: due to different platforms and python versions')
print('and in some cases due to a missing unique random seed')
print('many examples will "fail". This is OK, if they give a similar')
print('to the expected result and if no exception occurs. ')
# if argv[1][2] == 'v':
doctest.testmod(report=True) # this is quite cool!
return
elif argv[1] == '--doc':
print(__doc__)
print(CMAEvolutionStrategy.__doc__)
print(fmin.__doc__)
fun = None
elif argv[1] == '--fcts':
print('List of valid function names:')
print([d for d in dir(fcts) if not d.startswith('_')])
fun = None
elif argv[1] in ('install', '--install'):
from distutils.core import setup
setup(name = "cma",
version = __version__,
author = "Nikolaus Hansen",
# packages = ["cma"],
py_modules = ["cma"],
)
fun = None
elif len(argv) > 3:
fun = eval('fcts.' + argv[1])
else:
print('try -h option')
fun = None
if fun is not None:
if len(argv) > 2: # dimension
x0 = np.ones(eval(argv[2]))
if len(argv) > 3: # sigma
sig0 = eval(argv[3])
opts = {}
for i in xrange(5, len(argv), 2):
opts[argv[i-1]] = eval(argv[i])
# run fmin
if fun is not None:
tic = time.time()
fmin(fun, x0, sig0, **opts) # ftarget=1e-9, tolfacupx=1e9, verb_log=10)
# plot()
# print ' best function value ', res[2]['es'].best[1]
print('elapsed time [s]: + %.2f', round(time.time() - tic, 2))
elif not len(argv):
fmin(fcts.elli, np.ones(6)*0.1, 0.1, ftarget=1e-9)
#____________________________________________________________
#____________________________________________________________
#
# mainly for testing purpose
# executed when called from an OS shell
if __name__ == "__main__":
# for i in range(1000): # how to find the memory leak
# main(["cma.py", "rastrigin", "10", "5", "popsize", "200", "maxfevals", "24999", "verb_log", "0"])
main()
| mit |
robin-lai/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
amolkahat/pandas | pandas/tests/api/test_api.py | 2 | 7464 | # -*- coding: utf-8 -*-
import sys
import pytest
import pandas as pd
from pandas import api
from pandas.util import testing as tm
class Base(object):
def check(self, namespace, expected, ignored=None):
# see which names are in the namespace, minus optional
# ignored ones
# compare vs the expected
result = sorted(f for f in dir(namespace) if not f.startswith('_'))
if ignored is not None:
result = sorted(list(set(result) - set(ignored)))
expected = sorted(expected)
tm.assert_almost_equal(result, expected)
class TestPDApi(Base):
# these are optionally imported based on testing
# & need to be ignored
ignored = ['tests', 'locale', 'conftest']
# top-level sub-packages
lib = ['api', 'compat', 'core', 'errors', 'pandas',
'plotting', 'test', 'testing', 'tools', 'tseries',
'util', 'options', 'io']
# these are already deprecated; awaiting removal
deprecated_modules = ['parser', 'lib', 'tslib']
# misc
misc = ['IndexSlice', 'NaT']
# top-level classes
classes = ['Categorical', 'CategoricalIndex', 'DataFrame', 'DateOffset',
'DatetimeIndex', 'ExcelFile', 'ExcelWriter', 'Float64Index',
'Grouper', 'HDFStore', 'Index', 'Int64Index', 'MultiIndex',
'Period', 'PeriodIndex', 'RangeIndex', 'UInt64Index',
'Series', 'SparseArray', 'SparseDataFrame', 'SparseDtype',
'SparseSeries', 'Timedelta',
'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex']
# these are already deprecated; awaiting removal
deprecated_classes = ['TimeGrouper', 'Expr', 'Term']
# these should be deprecated in the future
deprecated_classes_in_future = ['Panel']
# external modules exposed in pandas namespace
modules = ['np', 'datetime']
# top-level functions
funcs = ['bdate_range', 'concat', 'crosstab', 'cut',
'date_range', 'interval_range', 'eval',
'factorize', 'get_dummies',
'infer_freq', 'isna', 'isnull', 'lreshape',
'melt', 'notna', 'notnull', 'offsets',
'merge', 'merge_ordered', 'merge_asof',
'period_range',
'pivot', 'pivot_table', 'qcut',
'show_versions', 'timedelta_range', 'unique',
'value_counts', 'wide_to_long']
# top-level option funcs
funcs_option = ['reset_option', 'describe_option', 'get_option',
'option_context', 'set_option',
'set_eng_float_format']
# top-level read_* funcs
funcs_read = ['read_clipboard', 'read_csv', 'read_excel', 'read_fwf',
'read_gbq', 'read_hdf', 'read_html', 'read_json',
'read_msgpack', 'read_pickle', 'read_sas', 'read_sql',
'read_sql_query', 'read_sql_table', 'read_stata',
'read_table', 'read_feather', 'read_parquet']
# top-level to_* funcs
funcs_to = ['to_datetime', 'to_msgpack',
'to_numeric', 'to_pickle', 'to_timedelta']
# top-level to deprecate in the future
deprecated_funcs_in_future = []
# these are already deprecated; awaiting removal
deprecated_funcs = ['pnow', 'match', 'groupby', 'get_store',
'plot_params', 'scatter_matrix']
def test_api(self):
self.check(pd,
self.lib + self.misc +
self.modules + self.deprecated_modules +
self.classes + self.deprecated_classes +
self.deprecated_classes_in_future +
self.funcs + self.funcs_option +
self.funcs_read + self.funcs_to +
self.deprecated_funcs_in_future +
self.deprecated_funcs,
self.ignored)
class TestApi(Base):
allowed = ['types', 'extensions']
def test_api(self):
self.check(api, self.allowed)
class TestTesting(Base):
funcs = ['assert_frame_equal', 'assert_series_equal',
'assert_index_equal']
def test_testing(self):
from pandas import testing
self.check(testing, self.funcs)
class TestTopLevelDeprecations(object):
# top-level API deprecations
# GH 13790
def test_pnow(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.pnow(freq='M')
def test_term(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.Term('index>=date')
def test_expr(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.Expr('2>1')
def test_match(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.match([1, 2, 3], [1])
def test_groupby(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.groupby(pd.Series([1, 2, 3]), [1, 1, 1])
def test_TimeGrouper(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.TimeGrouper(freq='D')
# GH 15940
def test_get_store(self):
pytest.importorskip('tables')
with tm.ensure_clean() as path:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s = pd.get_store(path)
s.close()
class TestParser(object):
@pytest.mark.filterwarnings("ignore")
def test_deprecation_access_func(self):
pd.parser.na_values
class TestLib(object):
@pytest.mark.filterwarnings("ignore")
def test_deprecation_access_func(self):
pd.lib.infer_dtype('foo')
class TestTSLib(object):
@pytest.mark.filterwarnings("ignore")
def test_deprecation_access_func(self):
pd.tslib.Timestamp('20160101')
class TestTypes(object):
def test_deprecation_access_func(self):
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
from pandas.types.concat import union_categoricals
c1 = pd.Categorical(list('aabc'))
c2 = pd.Categorical(list('abcd'))
union_categoricals(
[c1, c2],
sort_categories=True,
ignore_order=True)
class TestCDateRange(object):
def test_deprecation_cdaterange(self):
# GH17596
from pandas.core.indexes.datetimes import cdate_range
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
cdate_range('2017-01-01', '2017-12-31')
class TestCategoricalMove(object):
def test_categorical_move(self):
# May have been cached by another import, e.g. pickle tests.
sys.modules.pop("pandas.core.categorical", None)
with tm.assert_produces_warning(FutureWarning):
from pandas.core.categorical import Categorical # noqa
sys.modules.pop("pandas.core.categorical", None)
with tm.assert_produces_warning(FutureWarning):
from pandas.core.categorical import CategoricalDtype # noqa
| bsd-3-clause |
Jinkeycode/DeeplearningAI_AndrewNg | Course1 Neural Networks and Deep Learning/Week3 Shallow Neural Networks/planar_utils.py | 3 | 2253 | import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1/(1+np.exp(-x))
return s
def load_planar_dataset():
np.random.seed(1)
m = 400 # number of examples
N = int(m/2) # number of points per class
D = 2 # dimensionality
X = np.zeros((m,D)) # data matrix where each row is a single example
Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 4 # maximum ray of the flower
for j in range(2):
ix = range(N*j,N*(j+1))
t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta
r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def load_extra_datasets():
N = 200
noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)
noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)
blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)
gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None)
no_structure = np.random.rand(N, 2), np.random.rand(N, 2)
return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure | mit |
adammenges/statsmodels | statsmodels/graphics/tukeyplot.py | 33 | 2473 | from statsmodels.compat.python import range
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.lines as lines
def tukeyplot(results, dim=None, yticklabels=None):
npairs = len(results)
fig = plt.figure()
fsp = fig.add_subplot(111)
fsp.axis([-50,50,0.5,10.5])
fsp.set_title('95 % family-wise confidence level')
fsp.title.set_y(1.025)
fsp.set_yticks(np.arange(1,11))
fsp.set_yticklabels(['V-T','V-S','T-S','V-P','T-P','S-P','V-M',
'T-M','S-M','P-M'])
#fsp.yaxis.set_major_locator(mticker.MaxNLocator(npairs))
fsp.yaxis.grid(True, linestyle='-', color='gray')
fsp.set_xlabel('Differences in mean levels of Var', labelpad=8)
fsp.xaxis.tick_bottom()
fsp.yaxis.tick_left()
xticklines = fsp.get_xticklines()
for xtickline in xticklines:
xtickline.set_marker(lines.TICKDOWN)
xtickline.set_markersize(10)
xlabels = fsp.get_xticklabels()
for xlabel in xlabels:
xlabel.set_y(-.04)
yticklines = fsp.get_yticklines()
for ytickline in yticklines:
ytickline.set_marker(lines.TICKLEFT)
ytickline.set_markersize(10)
ylabels = fsp.get_yticklabels()
for ylabel in ylabels:
ylabel.set_x(-.04)
for pair in range(npairs):
data = .5+results[pair]/100.
#fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data[1], linewidth=1.25,
fsp.axhline(y=npairs-pair, xmin=data.mean(), xmax=data[1], linewidth=1.25,
color='blue', marker="|", markevery=1)
fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data.mean(), linewidth=1.25,
color='blue', marker="|", markevery=1)
#for pair in range(npairs):
# data = .5+results[pair]/100.
# data = results[pair]
# data = np.r_[data[0],data.mean(),data[1]]
# l = plt.plot(data, [npairs-pair]*len(data), color='black',
# linewidth=.5, marker="|", markevery=1)
fsp.axvline(x=0, linestyle="--", color='black')
fig.subplots_adjust(bottom=.125)
results = np.array([[-10.04391794, 26.34391794],
[-21.45225794, 14.93557794],
[ 5.61441206, 42.00224794],
[-13.40225794, 22.98557794],
[-29.60225794, 6.78557794],
[ -2.53558794, 33.85224794],
[-21.55225794, 14.83557794],
[ 8.87275206, 45.26058794],
[-10.14391794, 26.24391794],
[-37.21058794, -0.82275206]])
#plt.show()
| bsd-3-clause |
dpshelio/sunpy | examples/map/plot_frameless_image.py | 1 | 1349 | """
===============================
Plotting a Map without any Axes
===============================
This examples shows you how to plot a Map without any annotations at all, i.e.
to save as an image.
"""
##############################################################################
# Start by importing the necessary modules.
import astropy.units as u
import matplotlib.pyplot as plt
import sunpy.map
from sunpy.data.sample import AIA_171_IMAGE
##############################################################################
# Create a `sunpy.map.GenericMap`.
smap = sunpy.map.Map(AIA_171_IMAGE)
##############################################################################
# Plot the Map without a frame.
# Setup a frameless figure and an axes which spans the whole canvas.
figure = plt.figure(frameon=False)
axes = plt.Axes(figure, [0., 0., 1., 1.])
# Disable the axis and add them to the figure.
axes.set_axis_off()
figure.add_axes(axes)
# Plot the map without any annotations
# This might raise a warning about the axes being wrong but we can ignore this
# as we are not plotting any axes.
im = smap.plot(axes=axes, annotate=False, clip_interval=(1, 99.99)*u.percent)
##############################################################################
# At this point you could save the figure with ``plt.savefig()`` or show it:
plt.show()
| bsd-2-clause |
jstraub/dpMM | python/evalWikiWordVectors.py | 1 | 8101 | # Copyright (c) 2015, Julian Straub <jstraub@csail.mit.edu>
# Licensed under the MIT license. See the license file LICENSE.
import numpy as np
from scipy.linalg import eig, logm
import subprocess as subp
import matplotlib.pyplot as plt
import mayavi.mlab as mlab
from matplotlib.patches import Ellipse
import ipdb, re, time
import os.path
from js.utils.plot.colors import colorScheme
from js.utils.config import Config2String
from vpCluster.manifold.karcherMean import karcherMeanSphere_propper
from vpCluster.manifold.sphere import Sphere
from pytagcloudLocal import create_tag_image, make_tags
import matplotlib as mpl
mpl.rc('font',size=20)
mpl.rc('lines',linewidth=3.)
x=np.loadtxt(dataPath,delimiter=' ')
N = x.shape[1]
D = x.shape[0]
norms = np.sqrt((x**2).sum(axis=0))
print norms
for d in range(D):
x[d,:] /= norms
dataPath = './vectors_wiki.20_prune100_onlyVects_colVects.csv'; #20D
cfg=dict()
cfg['base'] = 'DpNiwSphereFull';
cfg['K'] = 20;
cfg['T'] = 660;
cfg['alpha'] = 1.;
plotWordClouds = False
plot2D = False
outName,_ = os.path.splitext(dataPath)
outName += '_'+Config2String(cfg).toString()
print outName
#zAll = np.loadtxt(outName+'.lbl',dtype=int,delimiter=' ')
#logLike = np.loadtxt(outName+'.lbl_jointLikelihood.csv',delimiter=' ')
#T = z.shape[0]
#z = zAll[-1,:]
pathWords = '/data/vision/fisher/expres1/jstraub/results/wordVectorClustering/vectors_wiki.20_prune100_onlyWords.txt'
pathInds = '/data/vision/fisher/expres1/jstraub/results/wordVectorClustering/wikipediaWordVectors/vectors_wiki.20_prune100_onlyVects_colVects_alpha_1.0-K_20-base_DpNiwSphereFull-T_600.lblmlInds.csv'
pathLogLikes = '/data/vision/fisher/expres1/jstraub/results/wordVectorClustering/wikipediaWordVectors/vectors_wiki.20_prune100_onlyVects_colVects_alpha_1.0-K_20-base_DpNiwSphereFull-T_600.lblmlLogLikes.csv'
fid=open(pathWords)
words=fid.readlines()
words = [word[:-1] for word in words]
inds = np.loadtxt(pathInds)
logLikes = np.loadtxt(pathLogLikes)
M = Sphere(D)
Ks=[0,10,11,12,13,14,15,16,17,18,19,1,20]
Ks=[71]
Ks = np.arange(71,96)
Ks = np.arange(74,75)
Ks = np.arange(0,96)
for i,k in enumerate(Ks):
z = np.loadtxt(outName+'largeCovForTest{}_z.csv'.format(k),dtype=int)
zFull = z.copy();
z = np.floor(z/2)
cov = np.loadtxt(outName+'largeCovForTest{}_cov.csv'.format(k))
mu = np.loadtxt(outName+'largeCovForTest{}_mu.csv'.format(k))
inds = np.arange(N)[z==k]
print k, (z==k).sum()
xs_k = x[:,z==k]
x_k = M.LogTo2D(mu,xs_k)
subClInd = zFull[z==k]%2
# if (z==k).sum() < 200 or (z==k).sum() > 300:
# continue
e,V = eig(cov)
std = np.sqrt(e.real)
iEigs = np.argsort(e);
if plotWordClouds:
vTop = np.c_[V[:,iEigs[-1]], V[:,iEigs[-2]], V[:,iEigs[-3]]]
xProj = vTop.T.dot(x_k)*180.0/np.pi
mlab.figure(bgcolor=(1,1,1))
mlab.points3d(xProj[0,:],xProj[1,:],xProj[2,:],mode='point')
s = 1.
for k,ind in enumerate(inds):
print words[ind]
mlab.text3d(xProj[0,k],xProj[1,k],xProj[2,k],
words[ind],color=(0,0,0),scale=s)
mlab.show(stop=True)
if plot2D:
iEigs = np.argsort(e);
vTop = np.c_[V[:,iEigs[-1]], V[:,iEigs[-2]]]
p = (vTop.T.dot(x_k)*180.0/np.pi).T
xSort = np.argsort(p[:,0])
ySort = np.argsort(p[:,1])
xWords = [words[ind] for ind in inds[xSort]]
yWords = [words[ind] for ind in inds[ySort]]
fig = plt.figure()
plt.subplot(2,1,1)
plt.plot(p[subClInd==0,0],p[subClInd==0,1],'xr')
plt.plot(p[subClInd==1,0],p[subClInd==1,1],'xb')
for l,word in enumerate(xWords[0:12]):
plt.text(p[xSort[l],0],p[xSort[l],1],word)
for l,word in enumerate(xWords[-13:-1]):
plt.text(p[xSort[-l-1],0],p[xSort[-l-1],1],word)
plt.subplot(2,1,2)
plt.plot(p[subClInd==0,0],p[subClInd==0,1],'xr')
plt.plot(p[subClInd==1,0],p[subClInd==1,1],'xb')
for l,word in enumerate(yWords[0:12]):
plt.text(p[ySort[l],0],p[ySort[l],1],word)
for l,word in enumerate(yWords[-13:-1]):
plt.text(p[ySort[-l-1],0],p[ySort[-l-1],1],word)
fig.show()
fig = plt.figure()
for l,word in enumerate(xWords[0:12]):
plt.text(-1,l-6,word)
plt.plot(-1,l-6,'x')
for l,word in enumerate(xWords[-13:-1]):
plt.text(1,l-6,word)
plt.plot(1,l-6,'x')
for l,word in enumerate(yWords[0:12]):
plt.text(0,-l-1,word)
plt.plot(0,-1-l,'x')
for l,word in enumerate(yWords[-13:-1]):
plt.text(0,13-l,word)
plt.plot(0,13-l,'x')
fig.show()
wordcounts =[]
for l,w in enumerate(xWords[0:50]):
wordcounts.append((w,(len(xWords)-l)**5))#((x_k[0,l]**2).sum())*180.0/np.pi))
create_tag_image(make_tags(wordcounts, maxsize=60),
'cloud_large_xWordsBegin.png', size=(1000, 600), fontname='Cantarell')
wordcounts =[]
for l,w in enumerate(xWords[-1:-51]):
wordcounts.append((w,(len(xWords)-l)**5))#((x_k[0,l]**2).sum())*180.0/np.pi))
create_tag_image(make_tags(wordcounts, maxsize=60),
'cloud_large_xWordsEnd.png', size=(1000, 600), fontname='Cantarell')
wordcounts =[]
for l,w in enumerate(yWords[0:50]):
wordcounts.append((w,(len(yWords)-l)**5)) #((x_k[1,l]**2).sum())*180.0/np.pi))
create_tag_image(make_tags(wordcounts, maxsize=60),
'cloud_large_yWordsBegin.png', size=(1000, 600), fontname='Cantarell')
wordcounts =[]
for l,w in enumerate(yWords[-1:-51]):
wordcounts.append((w,(len(yWords)-l)**5)) #((x_k[1,l]**2).sum())*180.0/np.pi))
create_tag_image(make_tags(wordcounts, maxsize=60),
'cloud_large_yWordsEnd.png', size=(1000, 600), fontname='Cantarell')
# wordcounts =[]
# for l,ind in enumerate(inds):
# if len(words[ind]) > 3 and not words[ind] == 'domingo':
# wordcounts.append((words[ind].encode('iso-8859-1'),
# ((x_k[:,l]**2).sum())*180.0/np.pi))
#
## print words[ind]
# tags = make_tags(wordcounts, maxsize=60)
## for font in ["Nobile", "Old Standard TT", "Cantarell", "Reenie Beanie", "Cuprum", "Molengo", "Neucha", "Yanone Kaffeesatz", "Cardo", "Neuton", "Inconsolata", "Crimson Text", "Josefin Sans", "Droid Sans", "Lobster", "IM Fell DW Pica", "Vollkorn", "Tangerine", "Coustard", "PT Sans Regular"]:
# font = 'Cuprum' #'Cantarell'
# try:
# create_tag_image(tags, outName+'cloud_large_'+font+'_{}.png'.format(k),
# size=(1200, 800)) #, fontname=font)
# except:
# pass
iEigs = np.argsort(e)[-3:-1]
iEigs = [np.argmax(e), np.argmin(e)]
iEigs = [np.argsort(e)[-1], np.argsort(e)[-2]]
print iEigs
print np.sort(e)
vals = np.sort( np.sqrt((x_k**2).sum(axis=0))/np.pi*180.0)
# print vals
iGlobal = np.argsort( vals )
wordsGlobal = []
for j,ind in enumerate(inds[iGlobal]):
wordsGlobal.append(words[ind])
# if j < 40:
# print words[ind]+":"+"{}".format(int(np.floor(vals[j])))
#(x_k**2).sum(axis=0)(x_k**2).sum(axis=0) print np.sort( (x_k**2).sum(axis=0) )
for l,iEig in enumerate(iEigs):
# iMax = np.argmax(e)
print '------------ eigVal {} at dim {} ---------- '.format(e[iEig],iEig)
v = V[:,iEig]
xProj = v.dot(x_k)
iSorted = np.argsort(xProj)
# print inds
# print inds[iSorted]
# print xProj[iSorted]
wordsSorted = []
for j,ind in enumerate(inds[iSorted]):
wordsSorted.append(words[ind])
# print xProj[iSorted[j]],words[ind]
# print wordsSorted
mid = np.argmin(np.abs(xProj[iSorted]))
# print mid
nr = min(24,len(wordsSorted)/3)
print wordsSorted[0:nr]
# for i,word in enumerate(wordsSorted[0:nr]):
# print word+':{}'.format(int(nr-i)+10)
print wordsGlobal[0:nr]
for i,word in enumerate(wordsGlobal[0:nr]):
print word+':{}'.format(int(nr-i)+10)
print wordsSorted[-1:(-nr-1):-1]
# for i,word in enumerate(wordsSorted[-1:-1-nr:-1]):
# print word+':{}'.format(int(nr-i)+10)
# print xProj[iSorted[-nr:-1]]
# ipdb.set_trace()
| mit |
samuelleblanc/flight_planning_dist | flight_planning/write_utils.py | 2 | 20750 |
# coding: utf-8
# In[1]:
def __init__():
"""
Name:
write_utils
Purpose:
Module regrouping codes that are used to write out certain tyes of files. Example: ict
See each function within this module
Contains the following functions:
- write_ict: for writing out an ict file
- merge_dicts: for merging multiple dicts
- ict_tester: for testing the ict files
Dependencies:
- numpy
- datetime
Needed Files:
None
Modification History:
Written: Samuel LeBlanc, NASA Ames, Santa Cruz, CA, 2016-03-25
"""
pass
# In[366]:
def write_ict(header_dict,data_dict,filepath,data_id,loc_id,date,rev,order=[],default_format='.3f',file_comment=''):
"""
Purpose:
to write out a file in the ICARTT file format used by NASA archiving
Input:
filepath: full path of folder of the file to be saved
data_id: for the prefix of the file name the instrumnet identifier
loc_id: for the prefix of the file name, the location identifier
date: date of the data, used in file naming convention
rev: revision value of the file (RA,RB for infield, R0,R1,... for archiving)
data_dict: dictionary with each key representing a different variable to be saved
each key is a dictionary of its own with the following keys:
- data: time series numpy array of the data to be saved
- unit: string value of the unit, if 'None' or '' set to be 'unitless' by default
- long_description: the long description of the variable
- format: (optional) the format used for writing out the data, ex: 4.2f
header_dict: dictionary with a set of predefined keys for writing the header information, each is a string
- PI: name of PI
- Institution: name of the institution
- Instrument: full name of instrument/data source
- campaign: name of mission/campaign
- volume: (default 1) volume number
- file_num: (default 1) number of files
- time_interval: (default 1 second) the number of seconds between each archive
- indep_var_name: (defaults to Start_UTC) name of the data_dict variable which is the independant variable
- missing_val: (defaults to -9999) value that replaces the missing data
- special_comments: (optional) string of special comments to be written out.
Newline must be indicated by '\n'
- PI_contact: contact info for the PI
- platform: full platform name
- location: full name of location : refer to campaign location or plane location
- associated_data: (defaults to 'N/A') list files of associated data
- instrument_info: more detailed information on the instrument
- data_info: any specific info on the data, like time averging, ppm by volume
- uncertainty: specific notes about data uncertainty
- ULOD_flag: (defaults to -7777) value written when variable is past the upper limit of detection
- ULOD_value: (defaults to 'N/A') value of the upper limit of detection
- LLOD_flag: (defaults to -8888) value written when the measurement is below the lower limit of detection
- LLOD_value: (defaults to 'N/A') value of the lower limit of detection
- DM_contact: Contact information of the data manager for this data
Name, affiliation, phone number, mailing address, email address and/or fax number.
- project_info: Information on the project
- stipulations: Details the stipulations on the use of this data
- Comments: (optional, if empty, returns 'N/A') any specific comments that is wanted to be included.
if multiple lines, seperate with '\n'
- rev_comments: Comments related to each revision, newest first. Each ine is seperate by a '\n'
Output:
ict file with name format: '{data_id}_{loc_id}_{date}_{rev}.ict'
Keywords:
order:(optional) list of names of the data variables put in order that will be saved in the file.
if omitted, variables will be saved in random order.
default_format: (defaults to '.3f') The format to use when writing out numbers
when no specific format is defined for each data variable
file_comment: (optional) If you want to put in a comment in the file name
Dependencies:
Numpy
datetime
write_utils (this module)
Needed Files:
None
Example:
see code for ict_tester function in this module
Modification History:
Written: Samuel LeBlanc, NASA Ames, Santa Cruz, 2016-03-25, Holy Friday
"""
# module loads
import numpy as np
from datetime import datetime
#from write_utils import merge_dicts
# Should do input checking...
# set up file path to write to
data_id = data_id.replace('_','-')
loc_id = loc_id.replace('_','-')
if file_comment:
file_comment = '_'+file_comment.replace(' ','-').replace('_','-')
fname = filepath+'{data_id}_{loc_id}_{date}_{rev}{file_comment}.ict'.format( data_id=data_id,loc_id=loc_id,date=date,rev=rev,file_comment=file_comment)
#f = open(fname,'w')
# set the default dict values
def_dict = {'rev':rev,
'volume':1,
'file_num':1,
'time_interval':1.0,
'indep_var_name':'Start_UTC',
'missing_val':-9999,
'special_comments':'',
'associated_data':'N/A',
'ULOD_flag':-7777,'ULOD_value':'N/A',
'LLOD_flag':-8888,'LLOD_value':'N/A',
'Comments':'N/A',
'nlines':'{nlines}',
'date_y':date[:4],'date_m':date[4:6],'date_d':date[6:],
'now':datetime.now(),
'num_data':len(data_dict)-1, # remove one for the independent variable
'num_special_comments':len(header_dict['special_comments'].splitlines())}
head = merge_dicts(def_dict,header_dict)
# Compile the header information and verify some inputs
head['data_head'] = ','.join(('1 '*head['num_data']).split())+'\n'+ ','.join(('{missing_val} '*head['num_data']).split()).format(**head)
head['data_format'] = '{t:.0f}'
head['data_names'] = '{indep_var_name}'.format(**head)
nv = head['indep_var_name']
head['indep_var_unit'],head['indep_var_desc'] = data_dict[nv]['unit'],data_dict[nv]['long_description']
head['rev_comments'] = head['rev_comments'].strip()
if head['rev_comments'].find(head['rev'])<0:
print "*** Revision comments don't include the current revision, please update ***"
print '*** exiting, file not saved ***'
return
if head['rev_comments'].find(head['rev'])>0:
print """*** Revision comments are not in the right order please update
Have the current revision identifier in the top place ***"""
print '*** exiting, file not saved ***'
return
dnames = []
if not order:
order = data_dict.keys()
for n in order:
print n
if not n==head['indep_var_name']:
stemp = '{n}, {unit}, {long_description}'.format(n=n,**data_dict[n])
head['data_head'] = head['data_head']+'\n'+stemp
if 'format' in data_dict[n]:
fmt = data_dict[n]['format']
else:
fmt = default_format
head['data_format'] = head['data_format']+',{:'+'{fmt}'.format(fmt=fmt)+'}'
head['data_names'] = head['data_names']+','+n
dnames.append(str(n))
try:
head['support_info'] = """-----------------------------------------------------------------------------
PI_CONTACT_INFO: {PI_contact}
PLATFORM: {platform}
LOCATION: {location}
ASSOCIATED_DATA: {associated_data}
INSTRUMENT_INFO: {instrument_info}
DATA_INFO: {data_info}
UNCERTAINTY: {uncertainty}
ULOD_FLAG: {ULOD_flag}
ULOD_VALUE: {ULOD_value}
LLOD_FLAG: {LLOD_flag}
LLOD_VALUE: {LLOD_value}
DM_CONTACT_INFO: {DM_contact}
PROJECT_INFO: {project_info}
STIPULATIONS_ON_USE: {stipulations}
OTHER_COMMENTS: {Comments}
REVISION: {rev}
{rev_comments}
-----------------------------------------------------------------------------
{data_names}""".format(**head)
except KeyError as v:
print '*** problem with header value of {v} ***'.format(v=v)
print '*** exiting, file not saved ***'
return
head['num_info'] = len(head['support_info'].splitlines())
try:
head_str = """{nlines}, 1001
{PI}
{Institution}
{Instrument}
{campaign}
{volume},{file_num}
{date_y},{date_m},{date_d},{now:%Y,%m,%d}
{time_interval}
{indep_var_name}, {indep_var_unit}, {indep_var_desc}
{num_data}
{data_head}
{num_special_comments}
{special_comments}
{num_info}
{support_info}
""".format(**head)
except KeyError as v:
print '*** problem with header value of {v} ***'.format(v=v)
print '*** exiting, file not saved ***'
return
# Now open and write out the header and data to the file
with open(fname,'w') as f:
f.write(head_str.format(nlines=len(head_str.splitlines())))
for i,t in enumerate(data_dict[head['indep_var_name']]['data']):
dat = [] # build each line and run checks on the data
for n in dnames:
d = data_dict[n]['data'][i]
if not np.isfinite(d):
d = head['missing_val']
if not type(head['ULOD_value']) is str:
if d>head['ULOD_value']:
d = head['ULOD_flag']
if not type(head['LLOD_value']) is str:
if d<head['LLOD_value']:
d = head['LLOD_flag']
dat.append(float(d))
try:
f.write(head['data_format'].format(*dat,t=t)+'\n')
except:
import pdb; pdb.set_trace()
print 'File writing successful to: {}'.format(fname)
return
# In[265]:
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
# In[282]:
def ict_tester():
"""
Simple function to test the write_ict file function
makes a file with dummy variables
"""
import numpy as np
d_dict = {'Start_UTC':{'data':[230,231,232],'unit':'seconds from midnight UTC','long_description':'time keeping'},
'X1':{'data':[1,2,3],'unit':'None','long_description':'test 1'},
'X2':{'data':[10.9,11.9,12.9],'unit':'None','long_description':'test2'},
'X3':{'data':[-2,-3,np.NaN],'unit':'somethinf','long_description':'tutor3'}
}
print d_dict
hdict = {'PI':'Samuel LeBlanc',
'Institution':'NASA Ames',
'Instrument':'tester',
'campaign':'NAAMES tester',
'special_comments':'Only for testing with 3 data points',
'PI_contact':'Samuel LeBlanc, samuel.leblanc@nasa.gov',
'platform':'C130',
'location':'based out of Santa Cruz, actual location in C130 file',
'instrument_info':'None',
'data_info':'made up',
'uncertainty':'Undefined',
'DM_contact':'See PI',
'project_info':'NAAMES tester, made up data',
'stipulations':'None',
'rev_comments':""" RA: first test of it\nR0: older"""
}
print hdict
order = ['X1','X2','X3']
write_ict(hdict,d_dict,filepath='C:/Users/sleblan2/Research/NAAMES/',
data_id='4STAR_test',loc_id='C130',date='20160402',rev='RA',order=order)
# In[369]:
def prep_data_for_ict(data_dict,Start_UTC=None,End_UTC=None,
in_var_name='utc',out_var_name='Start_UTC', in_input=True,time_interval=1.0):
"""
Purpose:
To create the time variable that matches the requirement of a ict file (ICARTT) for NASA archiving
converts the in_var_name variable from utc hours, or datetime object to secdonds from utc save to the out_var_name
Takes in the data_dict and makes sure each variable has data corresponding to each time entry
Does nearest neighbor interpolation for linking each second measurement to a array value
Creates an uninterupted data stream from the first point to the last, with missing data identifiers in between.
Returns a modified data array.
Input:
data_dict: dictionary with each key representing a different variable to be saved
each key is a dictionary of its own with the following keys:
- data: time series numpy array of the data to be saved
- unit (not used)
- long_description (not used)
- format (not used)
Output:
modified data array in data_dict, to have a continuous time series without time gaps
Keywords:
in_var_name: (defaults to utc) the name of the variable holding the time series at the native resolution
out_var_name: (defaults to Start_UTC) the name of the variable holding the seconds from midnight values
Start_UTC: (optional) the start point of the time series, if not the first time point of the measurements
End_UTC: (optional) the end point of the time series, if not the last point of the measurements
in_input: (defaults to True) if set to True the Start_UTC and End_UTC use the same time series
units as the in_var_name, if False, uses the out_var_name (seconds from midnight)
time_interval: (defaults to 1 second) the time interval of the nearest neighbor interpolation and to be saved
Dependencies:
Numpy
datetime
write_utils (this module)
Needed Files:
None
Example:
...
Modification History:
Written: Samuel LeBlanc, NASA Ames, Santa Cruz, 2016-04-04
"""
import numpy as np
from write_utils import nearest_neighbor
# check input
if not in_var_name in data_dict:
print "*** the variable defined by '{}' should be included in the data_dict ***".format(in_var_name)
iv = in_var_name
ov = out_var_name
if type(data_dict[iv]['data']) is np.ndarray:
if data_dict[iv]['data'].dtype is np.dtype(object):
print 'input variable not a recognized type'
return
elif data_dict[iv]['data'].dtype is np.dtype(float):
# manageable type of float utc hours
utcs = data_dict[iv]['data']*3600.0
else:
if type(data_dict[iv]['data']) is float:
utcs = np.array(data_dict[iv]['data'])*3600.0
else:
print 'non manageable input type, please make utc hours'
return
# get the limits of the time series
if not Start_UTC:
Start_UTC = utcs[0]
else:
if in_input:
Start_UTC = Start_UTC*3600.0
if not End_UTC:
End_UTC = utcs[-1]
else:
if in_input:
End_UTC = End_UTC*3600.0
# create the out_var_name array
utc_out = np.arange(Start_UTC,End_UTC+1,time_interval)
data_out = data_dict.copy()
# now run through each data_dict variable to get the nearest neighbor
for n in data_out:
new = nearest_neighbor(utcs,data_out[n]['data'],utc_out,dist=time_interval/2.0)
data_out[n]['data'] = new
data_out[ov] = {'data':utc_out,'unit':'Seconds',
'long_description':'Time of measurement continuous starting from midnight UTC'}
del(data_out[iv])
return data_out
# In[330]:
def nearest_neighbor(X,Y,Xnew,dist=1):
"""
Purpose:
To return a nearest neighbor linear interpolation, but with a limit on the possible distance between the two points
Input:
X: initial independent variable
Y: initial dependant variable
Xnew: new independant variable to interpolate over
dist: max distance allowed
Output:
Ynew: new dependent variable interpolate using nearest neighbor
Keywords:
dist: (default 1) see above
Dependencies:
Numpy
Sp_parameters
Example:
...
Modification History:
Written: Samuel LeBlanc, NASA Ames, Santa Cruz, 2016-04-04
"""
from Sp_parameters import find_closest
import numpy as np
i = find_closest(X,Xnew)
Ynew = Y[i]
i_bad = abs(X[i]-Xnew) > dist
try:
Ynew[i_bad] = np.nan
except ValueError:
YYnew = Ynew.astype('float64')
YYnew[i_bad] = np.nan
Ynew = YYnew
return Ynew
# In[365]:
def make_plots_ict(data_dict,filepath,data_id,loc_id,date,rev,plot_together=[],plot_together2=[],indep_var_name='Start_UTC'):
"""
Purpose:
To plot the variables saved in the data_dict for prepping when saving
Input:
filepath: full path of folder of the file to be saved
data_id: for the prefix of the file name the instrumnet identifier
loc_id: for the prefix of the file name, the location identifier
date: date of the data, used in file naming convention
rev: revision value
data_dict : see description in write_ict, uses data and unit
Output:
plots
Keywords:
plot_together: (optional), list of names of variables to be plotted together on the same figure
plot_together2: (optional), second list of names of variables to be plotted together on the same figure
indep_var_name: (defaults to Start_UTC) the variable name of the independent variable in data_dict
Dependencies:
Numpy
Sp_parameters
matplotlib
Example:
...
Modification History:
Written: Samuel LeBlanc, NASA Ames, Santa Cruz, 2016-04-04
"""
import numpy as np
import matplotlib.pyplot as plt
plt.rc('text', usetex=False)
utc = data_dict[indep_var_name]['data']
ll = data_dict.keys()
ll.remove(indep_var_name)
if plot_together:
fig = plt.figure()
for n in plot_together:
ll.remove(n)
plt.plot(utc,data_dict[n]['data'],'x',label=n)
plt.legend(frameon=False)
plt.xlabel('UTC [seconds from midnight]')
plt.ylabel('Values')
plt.title(u'{data_id}_{loc_id}_{date}_{rev}.ict'.format(data_id=data_id,loc_id=loc_id,date=date,rev=rev))
print 'plotting the togethers'
fig.savefig(filepath+'{data_id}_{loc_id}_{date}_{rev}_together.png'.format( data_id=data_id,loc_id=loc_id,date=date,rev=rev),dpi=600,transparent=True)
if plot_together2:
fig = plt.figure()
for n in plot_together2:
ll.remove(n)
plt.plot(utc,data_dict[n]['data'],'+',label=n)
plt.legend(frameon=False)
plt.xlabel('UTC [seconds from midnight]')
plt.ylabel('Values')
plt.title(u'{data_id}_{loc_id}_{date}_{rev}.ict'.format(data_id=data_id,loc_id=loc_id,date=date,rev=rev))
print 'plotting the togethers 2'
fig.savefig(filepath+'{data_id}_{loc_id}_{date}_{rev}_together2.png'.format( data_id=data_id,loc_id=loc_id,date=date,rev=rev),dpi=600,transparent=True)
for n in ll:
fig = plt.figure()
plt.plot(utc,data_dict[n]['data'],'s',label=n)
plt.legend(frameon=False)
plt.xlabel('UTC [seconds from midnight]')
plt.ylabel('{n} [{unit}]'.format(n=n,unit=data_dict[n].get('unit')))
plt.title(u'{data_id}_{loc_id}_{date}_{rev} for {n}'.format(data_id=data_id,loc_id=loc_id,date=date,rev=rev,n=n))
print 'plotting {}'.format(n)
fig.savefig(filepath+'{data_id}_{loc_id}_{date}_{rev}_{n}.png'.format( data_id=data_id,loc_id=loc_id,date=date,rev=rev,n=n),dpi=600,transparent=True)
| gpl-3.0 |
Candihub/pixel | apps/data/io/parsers.py | 1 | 7594 | import logging
import pandas
from django.utils.translation import ugettext
from hashlib import blake2b
from ..models import Entry, Repository
logger = logging.getLogger(__name__)
class ChrFeatureParser(object):
def __init__(self, file_path, database_name, root_url, skip_rows=0):
self.file_path = file_path
self.database_name = database_name
self.root_url = root_url
self.features = None
self.skip_rows = skip_rows
self.entries = {
'new': [],
'update': [],
}
def _get_headers(self):
"""
This method MUST return the ordered column names of the file to parse
as a tuple. All the column names have to be supplied and the following
values are expected:
- aliases
- description
- gene_name
- id
- name
"""
raise NotImplementedError(
ugettext(
"You should define the _get_headers() method on your parser."
)
)
def parse(self):
self.features = pandas.read_table(
self.file_path,
header=None,
names=self._get_headers(),
skiprows=self.skip_rows
)
def _to_entries(self, ignore_aliases):
if self.features is None:
return
repository, _ = Repository.objects.get_or_create(
name=self.database_name
)
known_entries = repository.entries.values_list('identifier', flat=True)
entries = {
'update': [],
'new': [],
}
for idx, feature in self.features.iterrows():
url = '{}{}'.format(self.root_url, feature['id'])
aliases = []
if pandas.isna(feature['name']):
logger.warning(f"Invalid feature name for id={feature['id']}")
continue
if not pandas.isna(feature['aliases']) and not ignore_aliases:
splitted_aliases = feature['aliases'].split('|')
aliases = list(
filter(
lambda a: len(a) <= 100,
map(str, splitted_aliases)
)
)
if len(aliases) != len(splitted_aliases):
logger.warning(
'Ignored long aliases (100+ chars) in {}'.format(
feature['aliases']
)
)
for identifier in (str(feature['name']), *aliases):
# The Entry primary key is precomputed given an identifier and
# a repository. By doing so, we will be able to create a set()
# to remove duplicates potentially generated by aliases.
pk = blake2b(
bytes(
'/'.join((identifier, repository.pk.hex)),
encoding='utf8'
),
digest_size=16
).hexdigest()
description = feature['description']
if not pandas.isna(feature['gene_name']):
gene_name = feature['gene_name'].strip()
description = '{} | {}'.format(gene_name, description)
entry = Entry(
pk=pk,
identifier=identifier,
description=description,
url=url,
repository=repository,
)
if identifier in known_entries:
entries['update'].append(entry)
else:
entries['new'].append(entry)
self.entries['new'] = list(set(entries.get('new')))
self.entries['update'] = list(set(entries.get('update')))
def save(self, ignore_aliases=True):
self._to_entries(ignore_aliases=ignore_aliases)
# Create new entries
Entry.objects.bulk_create(self.entries['new'], batch_size=500)
# Update old entries
for updated_entry in self.entries['update']:
entry = Entry.objects.get(
identifier=updated_entry.identifier,
repository=updated_entry.repository
)
entry.description = updated_entry.description
entry.save(update_fields=['description', ])
class CGDParser(ChrFeatureParser):
"""
Expected columns:
(A) 1. Feature name (mandatory); this is the primary systematic name,
if available
(B) 2. Gene name (locus name)
(C) 3. Aliases (multiples separated by |)
(D) 4. Feature type
(E) 5. Chromosome
(F) 6. Start Coordinate
(G) 7. Stop Coordinate
(H) 8. Strand
(I) 9. Primary CGDID
(J) 10. Secondary CGDID (if any)
(K) 11. Description
(L) 12. Date Created
(M) 13. Sequence Coordinate Version Date (if any)
(N) 14. Blank
(O) 15. Blank
(P) 16. Date of gene name reservation (if any).
(Q) 17. Has the reserved gene name become the standard name? (Y/N)
(R) 18. Name of S. cerevisiae ortholog(s) (multiples separated by |)
"""
def __init__(self, file_path):
super().__init__(
file_path,
database_name='CGD',
root_url='http://www.candidagenome.org/cgi-bin/locus.pl?dbid=',
# A CGD file contains a "header" content at the top of the file (8
# lines)
skip_rows=8
)
def _get_headers(self):
return (
'name', # A
'gene_name', # B
'aliases', # C
'type', # D
'chromosome', # E
'start', # F
'stop', # G
'strand', # H
'id', # I (cgdid)
'cgdid_2', # J
'description', # K
'created', # L
'crd_versionned', # M
'blk_1', # N
'blk_2', # O
'reserved', # P
'is_standard', # Q
'orthologs', # R
)
class SGDParser(ChrFeatureParser):
"""
Expected columns:
(A) 1. Primary SGDID (mandatory)
(B) 2. Feature type (mandatory)
(C) 3. Feature qualifier (optional)
(D) 4. Feature name (optional)
(E) 5. Standard gene name (optional)
(F) 6. Alias (optional, multiples separated by |)
(G) 7. Parent feature name (optional)
(H) 8. Secondary SGDID (optional, multiples separated by |)
(I) 9. Chromosome (optional)
(J) 10. Start_coordinate (optional)
(K) 11. Stop_coordinate (optional)
(L) 12. Strand (optional)
(M) 13. Genetic position (optional)
(N) 14. Coordinate version (optional)
(O) 15. Sequence version (optional)
(P) 16. Description (optional)
"""
def __init__(self, file_path):
super().__init__(
file_path,
database_name='SGD',
root_url='https://www.yeastgenome.org/locus/'
)
def _get_headers(self):
return (
'id', # A (sgdid)
'feature_type', # B
'feature_qualifier', # C
'name', # D (feature name)
'gene_name', # E
'aliases', # F (alias)
'parent_feature_name', # G
'sgdid_2', # H
'chromosome', # I
'start', # J
'stop', # K
'strand', # L
'position', # M
'coordinate_version', # N
'sequence_version', # O
'description', # P
)
| bsd-3-clause |
joernhees/scikit-learn | examples/decomposition/plot_image_denoising.py | 70 | 6249 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
try:
from scipy import misc
face = misc.face(gray=True)
except AttributeError:
# Old versions of scipy have face in the top level package
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
mhdella/scikit-learn | sklearn/tree/tests/test_tree.py | 57 | 47417 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
wlamond/scikit-learn | sklearn/cluster/k_means_.py | 9 | 60297 | """K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..metrics.pairwise import pairwise_distances_argmin_min
from ..utils.extmath import row_norms, squared_norm, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : numpy.RandomState
The generator used to initialize the centers.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# Validate init array
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise ValueError("algorithm='elkan' not supported for sparse input X")
X = check_array(X, order="C")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
x_squared_norms : array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# Breakup nearest neighbor distance computation into batches to prevent
# memory blowup in the case of a large number of samples and clusters.
# TODO: Once PR #7383 is merged use check_inputs=False in metric_kwargs.
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=centers, metric='euclidean', metric_kwargs={'squared': True})
# cython k-means code assumes int32 inputs
labels = labels.astype(np.int32)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X : float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms : array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances : float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels : int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
x_squared_norms : array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[ 1., 2.],
[ 4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| bsd-3-clause |
blueskyjunkie/timeTools | timetools/synchronization/compliance/ituTG82611/__init__.py | 1 | 6978 | #
# Copyright 2017 Russell Smiley
#
# This file is part of timetools.
#
# timetools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# timetools is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with timetools. If not, see <http://www.gnu.org/licenses/>.
#
import matplotlib.pyplot as mpp
import multiprocessing as mtp
import numpy
import timetools.signalProcessing.jobQueue as tsj
def calculateFloorPacketPercent( pdvMicroseconds, thisClusterRangeThresholdMicroseconds ):
windowFloorDelayMicroseconds = numpy.min( pdvMicroseconds )
clusterPeakMicroseconds = windowFloorDelayMicroseconds + thisClusterRangeThresholdMicroseconds
floorClusterPdv = ( pdvMicroseconds >= windowFloorDelayMicroseconds ) & ( pdvMicroseconds <= clusterPeakMicroseconds )
numberFloorCluster = floorClusterPdv.sum()
numberWindowElements = len( pdvMicroseconds )
floorPacketPercent = numberFloorCluster / numberWindowElements * 100
return floorPacketPercent
def calculateClusterPeak( pdvMicroseconds, thisFloorPacketPercentThreshold ):
windowFloorDelayMicroseconds = numpy.min( pdvMicroseconds )
numberWindowElements = len( pdvMicroseconds )
numberClusterPackets = int( numpy.floor( ( thisFloorPacketPercentThreshold / 100 ) * numberWindowElements ) )
sortedUniquePdvMicroseconds = numpy.unique( pdvMicroseconds )
clusterThresholdPdvMicrosecond = sortedUniquePdvMicroseconds[ numberClusterPackets ]
clusterPeakPdv = clusterThresholdPdvMicrosecond - windowFloorDelayMicroseconds
return clusterPeakPdv
class _Worker( object ):
def __init__ ( self,
timebaseSeconds,
pdvMicroseconds,
floorPacketPercentThreshold,
clusterRangeThresholdMicroseconds,
windowDurationSeconds ):
self._timebaseSeconds = timebaseSeconds
self._pdvMicroseconds = pdvMicroseconds
self._floorPacketPercentThreshold = floorPacketPercentThreshold
self._clusterRangeThresholdMicroseconds = clusterRangeThresholdMicroseconds
self._windowDurationSeconds = windowDurationSeconds
def __call__ ( self, thisIntervalIndex ):
pdvAnalysis = []
for thisIndex, resultIndex in zip( thisIntervalIndex, range(0, len( thisIntervalIndex ) ) ):
lowerWindowBoundSeconds = self._timebaseSeconds[ thisIndex ]
upperWindowBoundSeconds = lowerWindowBoundSeconds + self._windowDurationSeconds
windowTimeIndex = ( self._timebaseSeconds >= lowerWindowBoundSeconds ) \
& ( self._timebaseSeconds < upperWindowBoundSeconds )
windowPdvMicroseconds = self._pdvMicroseconds[ windowTimeIndex ]
calculatedFloorPacketPercent = calculateFloorPacketPercent( windowPdvMicroseconds,
self._clusterRangeThresholdMicroseconds )
calculatedClusterPeakMicroseconds = calculateClusterPeak( windowPdvMicroseconds,
self._floorPacketPercentThreshold )
pdvAnalysis.append( ( calculatedFloorPacketPercent, calculatedClusterPeakMicroseconds ) )
return ( thisIntervalIndex, pdvAnalysis )
def scatterIndices( indexArray, numberGroups ):
'''
Split the array of indices into groups for multiprocessing.
Assume that the number of groups cannot necessarily be divided evenly
across the array.
'''
# numpy.array_split returns a float array so need to restore it to int for indexing.
floatSplitIndices = numpy.array_split( indexArray, numberGroups )
splitIndices = [ x.astype(int) for x in floatSplitIndices ]
return splitIndices
def _calculateResultSize( thisSplitResult ):
thisSize = 0
for thisResult in thisSplitResult:
thisSize += len( thisResult[0] )
return thisSize
def gatherResults( splitResultArray ):
'''
Assume splitResultArray is an iterable of tuples. The first element of
tuples is the group of indices calculated. The second element is a
result of arbitrary type.
'''
arraySize = _calculateResultSize( splitResultArray )
resultArray = [ () for i in range( 0, arraySize ) ]
orderedIndexArray = numpy.zeros( arraySize )
for thisResult in splitResultArray:
groupIndices = thisResult[0]
groupResults = thisResult[1]
for i in range(0, len( groupIndices ) ):
resultArray[ groupIndices[ i ] ] = groupResults[ i ]
orderedIndexArray[ groupIndices ] = groupIndices
return ( resultArray, orderedIndexArray )
def evaluatePdvNetworkLimits( measurementTimeSeconds,
pdvMagnitudeMicroseconds,
neededNumberWorkers = mtp.cpu_count() ):
windowDurationSeconds = 200
floorPacketPercentThreshold = 1
clusterRangeThresholdMicroseconds = 150
endIndex = numpy.searchsorted( measurementTimeSeconds,
( measurementTimeSeconds[ -1 ] - windowDurationSeconds ) )
indices = numpy.arange( 0, endIndex )
splitIndices = scatterIndices( indices, neededNumberWorkers )
thisWorker = _Worker( measurementTimeSeconds,
pdvMagnitudeMicroseconds,
floorPacketPercentThreshold,
clusterRangeThresholdMicroseconds,
windowDurationSeconds )
thisPool = tsj.NoDaemonPool( neededNumberWorkers )
splitResults = thisPool.map( thisWorker, splitIndices )
assert( len( splitResults ) == len( splitIndices ) )
results, orderedIndex = gatherResults( splitResults )
assert( len( results ) == len( indices ) )
assert( len( orderedIndex ) == len( indices ) )
assert( numpy.all( indices == orderedIndex ) )
calculatedFloorPacketPercent = numpy.array( [ x[0] for x in results ] )
calculatedClusterPeakMicroseconds = numpy.array( [ x[1] for x in results ] )
return ( numpy.all( calculatedFloorPacketPercent >= floorPacketPercentThreshold ),
numpy.all( calculatedClusterPeakMicroseconds < clusterRangeThresholdMicroseconds ),
calculatedFloorPacketPercent,
calculatedClusterPeakMicroseconds )
| gpl-3.0 |
shikhardb/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
flightgong/scikit-learn | sklearn/utils/fixes.py | 1 | 8311 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import numpy as np
import scipy.sparse as sp
np_version = []
for x in np.__version__.split('.'):
try:
np_version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
np_version.append(x)
np_version = tuple(np_version)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.copy(x)
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x-y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
| bsd-3-clause |
liangz0707/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/tree/unveil_tree_structure.py | 1 | 4786 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %ss else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| mit |
cogmission/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/offsetbox.py | 69 | 17728 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG=False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list))/(len(w_list)-1.)
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh+sep)*len(w_list)
else:
sep = float(total)/(len(w_list)) - maxh
offsets = np.array([(maxh+sep)*i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd)
return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent)
for c, (ox, oy) in zip(self.get_children(), offsets):
c.set_offset((px+ox, py+oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w,h,xd,yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
self.sep, self.mode)
yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent+self.pad, ydescent+self.pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
if self.height is None:
height_descent = max([h-yd for w,h,xd,yd in whd_list])
ydescent = max([yd for w,h,xd,yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2*self._pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w,h,xd,yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
self.sep, self.mode)
xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]
xdescent=whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent + self.pad, ydescent + self.pad, \
zip(xoffsets, yoffsets)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
return self.width, self.height, self.xdescent, self.ydescent
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if not textprops.has_key("va"):
textprops["va"]="baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform+self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[0][0] # first line
_, hh, dd = renderer.get_text_width_height_descent(
clean_line, self._text._fontproperties, ismath=ismath)
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline: # multi line
d = h-(hh-dd) # the baseline of the first line
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h-dd)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(dd, d_)
else:
d = dd
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
| agpl-3.0 |
fzalkow/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
prheenan/Research | Perkins/Projects/WetLab/Demos/PCR_Optimizations/2016-7-8-DMSO-trials/main_dmso_opt.py | 1 | 1467 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../../../../../../../")
from GeneralUtil.python import PlotUtilities as pPlotUtil
class DmsoInfo:
def __init__(self,DMSO,Repeats,YieldConcentration,YieldVolumeUl=35):
"""
<Description>
Args:
param1: This is the first param.
Returns:
This is a description of what is returned.
"""
self.DmsoPercent = DMSO
self.RepeatNumber = Repeats
self.YieldConc = YieldConcentration
self.YieldVolume = YieldVolumeUl
def run():
"""
"""
DMSO = [
# data from 7/8/2016, all overhang spacers
DmsoInfo(0,40,67),
DmsoInfo(1,40,57),
DmsoInfo(3,40,139),
DmsoInfo(5,40,97)
]
Pct = [d.DmsoPercent for d in DMSO]
YieldsNanograms = [d.YieldConc*d.YieldVolume for d in DMSO]
YieldsMicrograms = np.array(YieldsNanograms)/1000
fig = pPlotUtil.figure()
plt.plot(Pct,YieldsMicrograms,'ro')
pPlotUtil.lazyLabel("DMSO %","Yield (ug)",
"Increasing DMSO Percentage increases yield")
fudge = 0.5
plt.xlim([-fudge,max(Pct)+fudge])
pPlotUtil.savefig(fig,"DMSO.png")
if __name__ == "__main__":
run()
| gpl-3.0 |
GeographicaGS/daynight2geojson | daynight2geojson/daynight2geojson.py | 2 | 3599 | # -*- coding: utf-8 -*-
#
# Author: Cayetano Benavent, 2015.
# https://github.com/GeographicaGS/daynight2geojson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import json
import geojson
import shapely.geometry
import shapely.wkt
# To avoid errors if you run module without X11
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.basemap import Basemap
from datetime import datetime
class DayNight2Geojson(object):
"""
Get day and night geometry and dumps
to a GeoJSON file
"""
def __init__(self, filepath, input_date=None):
"""
filepath: destiny file to store output
GeoJSON with day-night geometry
input_date = None is for UTC now date
For others input date: datetime object must be passed
datetime(year, month, day, hour, minute)
"""
self.filepath = filepath
self.input_date = input_date
def getDayNight(self):
"""
Get day and night geometry an dumps
to a GeoJSON file
Default projection: Equirectangular (Cylindrical Equidistant)
Default date to compute: now (UTC)
"""
if self.input_date == None:
date = datetime.utcnow()
map_date = date.strftime("%d %b %Y %H:%M:%S")
else:
date = self.input_date
map_date = date.strftime("%d %b %Y %H:%M:%S")
map = Basemap(projection='cyl',lon_0=0, ellps='WGS84', resolution=None)
contour_set = map.nightshade(date)
self.__buildGeojson(contour_set, map_date)
print 'Day/Night Map for %s (UTC)' % (map_date)
def __buildGeojson(self, contour_set, map_date):
"""
Build GeoJSON with a input geometry
"""
n_coll = len(contour_set.collections)
for cs_coll in range(n_coll):
if len(contour_set.collections[cs_coll].get_paths()) > 0:
cs_paths = contour_set.collections[cs_coll].get_paths()[0]
vert = cs_paths.vertices
lon = vert[:,0]
lat = vert[:,1]
if len(lon) > 2:
coord_list = [(coord[0], coord[1]) for coord in zip(lon,lat)]
geom = shapely.geometry.polygon.Polygon(coord_list)
geom_wkt = shapely.wkt.loads(str(geom))
geom_geojson = geojson.Feature(geometry=geom_wkt, properties={'Date': map_date})
self.__writeGeojsonFile(geom_geojson)
def __writeGeojsonFile(self, geojson_str):
"""
Write GeoJSON string to a file.
"""
geojson_file = open(self.filepath, 'w')
geojson_file.write(json.dumps(geojson_str))
geojson_file.close()
| gpl-2.0 |
ctwj/crackCaptcha | captcha3.py | 1 | 4777 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import random
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import matplotlib.pyplot as plt
import numpy as np
default_font = "./font/DejaVuSans.ttf"
# 验证码中的字符, 就不用汉字了
number = ['0','1','2','3','4','5','6','7','8','9']
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
ALPHABET = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
# 验证码一般都无视大小写;验证码长度4个字符
def random_captcha_text(char_set=number+alphabet+ALPHABET, captcha_size=4):
captcha_text = []
for i in range(captcha_size):
c = random.choice(char_set)
captcha_text.append(c)
return captcha_text # 验证码字体
# 生成验证码接口
def generate_verify_image(size=(160, 60),
img_type="GIF",
mode="RGB",
bg_color=(255, 255, 255),
fg_color=(0, 0, 255),
font_size=20,
font_type=default_font,
length=4,
draw_lines=True,
n_line=(1, 2),
draw_points=True,
point_chance=2,
save_img=False):
"""
生成验证码图片
:param size: 图片的大小,格式(宽,高),默认为(120, 30)
:param img_type: 图片保存的格式,默认为GIF,可选的为GIF,JPEG,TIFF,PNG
:param mode: 图片模式,默认为RGB
:param bg_color: 背景颜色,默认为白色
:param fg_color: 前景色,验证码字符颜色,默认为蓝色#0000FF
:param font_size: 验证码字体大小
:param font_type: 验证码字体,默认为 DejaVuSans.ttf
:param length: 验证码字符个数
:param draw_lines: 是否划干扰线
:param n_line: 干扰线的条数范围,格式元组,默认为(1, 2),只有draw_lines为True时有效
:param draw_points: 是否画干扰点
:param point_chance: 干扰点出现的概率,大小范围[0, 100]
:param save_img: 是否保存为图片
:return: [0]: 验证码字节流, [1]: 验证码图片中的字符串
"""
width, height = size # 宽, 高
img = Image.new(mode, size, bg_color) # 创建图形
draw = ImageDraw.Draw(img) # 创建画笔
def create_lines():
"""绘制干扰线"""
line_num = random.randint(*n_line) # 干扰线条数
for i in range(line_num):
# 起始点
begin = (random.randint(0, size[0]), random.randint(0, size[1]))
# 结束点
end = (random.randint(0, size[0]), random.randint(0, size[1]))
draw.line([begin, end], fill=(0, 0, 0))
def create_points():
"""绘制干扰点"""
chance = min(100, max(0, int(point_chance))) # 大小限制在[0, 100]
for w in range(width):
for h in range(height):
tmp = random.randint(0, 100)
if tmp > 100 - chance:
draw.point((w, h), fill=(0, 0, 0))
def create_strs(length):
"""绘制验证码字符"""
c_chars = random_captcha_text(captcha_size=length)
strs = ' %s ' % ' '.join(c_chars) # 每个字符前后以空格隔开
font = ImageFont.truetype(font_type, font_size)
font_width, font_height = font.getsize(strs)
draw.text(((width - font_width) / 3, (height - font_height) / 3),
strs, font=font, fill=fg_color)
return ''.join(c_chars)
if draw_lines:
create_lines()
if draw_points:
create_points()
strs = create_strs(length)
# 图形扭曲参数
params = [1 - float(random.randint(1, 2)) / 100,
0,
0,
0,
1 - float(random.randint(1, 10)) / 100,
float(random.randint(1, 2)) / 500,
0.001,
float(random.randint(1, 2)) / 500
]
img = img.transform(size, Image.PERSPECTIVE, params) # 创建扭曲
img = img.filter(ImageFilter.EDGE_ENHANCE_MORE) # 滤镜,边界加强(阈值更大)
captcha_image = np.array(img)
return strs, captcha_image
def gen_captcha_text_and_image():
return generate_verify_image()
if __name__ == '__main__':
# 测试
while(1):
text, image = generate_verify_image(length=4)
print(image)
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1, 0.9,text, ha='center', va='center', transform=ax.transAxes)
plt.imshow(image)
plt.show()
break | apache-2.0 |
treycausey/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 8 | 6264 | """
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to :math:`\\frac{n_samples - n_features-1}{2}` outliers) estimator of
covariance. The idea is to find :math:`\\frac{n_samples+n_features+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided n_samples > 5 * n_features
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import pylab as pl
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
pl.subplot(2, 1, 1)
pl.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
pl.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
pl.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
pl.title("Influence of outliers on the location estimation")
pl.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
pl.legend(loc="upper left", prop=font_prop)
pl.subplot(2, 1, 2)
x_size = range_n_outliers.size
pl.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
pl.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
pl.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
pl.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
pl.title("Influence of outliers on the covariance estimation")
pl.xlabel("Amount of contamination (%)")
pl.ylabel("RMSE")
pl.legend(loc="upper center", prop=font_prop)
pl.show()
| bsd-3-clause |
RapidApplicationDevelopment/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 30 | 4727 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(tf.test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with tf.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with tf.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with tf.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with tf.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
jayaneetha/crowdsource-platform | fixtures/createJson.py | 6 | 2462 | __author__ = 'Megha'
# Script to transfer csv containing data about various models to json
# Input csv file constituting of the model data
# Output json file representing the csv data as json object
# Assumes model name to be first line
# Field names of the model on the second line
# Data seperated by __DELIM__
# Example:
# L01 ModelName: registrationmodel
# L02 FieldNames: user,activation_key,created_timestamp,last_updated
# L03 Data: 1,qwer,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z
# L04 Data: 2,assd,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z
import numpy as np
import pandas as pd
import json as json
__MODULE_NAME__ = 7 #Number of lines after which Model Name
__INPUT_FILE__ = 'meghaWorkerData.csv'
__OUTPUT_FILE__ = 'meghaWorkerData.json'
__NEWLINE__ = '\n'
__KEY1__ = 0
__KEY2__ = 0
__DELIM__ = ','
__APPEND__ = 'crowdsourcing.'
__KEY_MODEL__ = 'model'
__KEY_FIELDS__ = 'fields'
__KEY_PK__ = 'pk'
def create_dict(input_dict, module, data_collection):
for key, value in input_dict.items():
data_dict = {}
data_dict[__KEY_FIELDS__] = value
data_dict[__KEY_PK__] = key
data_dict[__KEY_MODEL__] = __APPEND__ + module
data_collection.append(data_dict)
return data_collection
def create_data_json(__FILE__):
in_fp = open(__INPUT_FILE__, 'rb')
file_lines = in_fp.readlines()
in_fp.close()
data_collection = []
for line_no in range(0, len(file_lines)):
if line_no % __MODULE_NAME__ == 0:
columns = file_lines[line_no + 1].strip(__NEWLINE__).split(__DELIM__)
instance1 = file_lines[line_no + 2].strip(__NEWLINE__).split(__DELIM__)
instance2 = file_lines[line_no + 3].strip(__NEWLINE__).split(__DELIM__)
instance3 = file_lines[line_no + 4].strip(__NEWLINE__).split(__DELIM__)
instance4 = file_lines[line_no + 5].strip(__NEWLINE__).split(__DELIM__)
instance5 = file_lines[line_no + 6].strip(__NEWLINE__).split(__DELIM__)
data = np.array([instance1,instance2,instance3,instance4,instance5])
df = pd.DataFrame(data, columns = columns)
create_dict(df.transpose().to_dict(), file_lines[line_no].strip(__NEWLINE__), data_collection)
del(df)
print data_collection
out_fp = open(__OUTPUT_FILE__, 'wb')
out_fp.write(json.dumps(data_collection, indent = 2))
out_fp.close()
if __name__ == '__main__':
create_data_json (__INPUT_FILE__) | mit |
jpautom/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
coin-pan/Toodledo-graphical-activity-tracker | tracker.py | 1 | 12636 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
#
# Toodledo Activity Tracker & Plotter
# Copyright (C) 2011 Marc Chauvet (marc DOT chauvet AT gmail DOT com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import urllib2
import re
import hashlib
import json
import getpass
import sys
import os
import datetime
import pylab
import time
# From joeld and crazy2be on http://stackoverflow.com/a/287944
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = "\033[1m"
# From William Park on https://www.physics.rutgers.edu/~masud/computing/WPark_recipes_in_python.html
def conv(x, y):
P, Q, N = len(x), len(y), len(x)+len(y)-1
z = []
for k in range(N):
t, lower, upper = 0, max(0, k-(Q-1)), min(P-1, k)
for i in range(lower, upper+1):
t = t + x[i] * y[k-i]
z.append(t)
return z
version = "0.1"
number_of_plotted_days = 30
def remove_comments(string):
return(re.sub("#.*\n","",string))
def isOK_apptoken():
try:
remove_comments("".join(open("apptoken.dat","r").readlines())).rstrip("\n")
return True
except IOError as e:
# "apptoken.dat" file doesn't exist
return False
def isOK_appid():
try:
remove_comments("".join(open("appid.dat","r").readlines())).rstrip("\n")
return True
except IOError as e:
# "appid.dat" file doesn't exist, need to create one
return False
def isOK_hashedpassword():
try:
remove_comments("".join(open("hashedpassword.dat","r").readlines())).rstrip("\n")
return True
except IOError as e:
# "hasedpassword.dat" file doesn't exist
return False
def isOK_userid():
try:
remove_comments("".join(open("userid.dat","r").readlines())).rstrip("\n")
return True
except IOError as e:
# "userid.dat" file doesn't exist, need to create one
return False
def get_hashedpassword():
return remove_comments("".join(open("hashedpassword.dat","r").readlines())).rstrip("\n")
def get_userid():
return remove_comments("".join(open("userid.dat","r").readlines())).rstrip("\n")
def get_sessiontoken():
return remove_comments("".join(open("sessiontoken.dat","r").readlines())).rstrip("\n")
def get_key():
return remove_comments("".join(open("key.dat","r").readlines())).rstrip("\n")
def lookup_userid(email,password):
signature = hashlib.md5(email + apptoken).hexdigest()
out = json.loads(urllib2.urlopen("http://api.toodledo.com/2/account/lookup.php?appid="+appid+";sig="+signature+";email="+email+";pass="+password).read())
userid = out["userid"]
return userid
def lookup_sessiontoken(userid, appid, apptoken, version):
signature = hashlib.md5(userid + apptoken).hexdigest()
out = json.loads(urllib2.urlopen("http://api.toodledo.com/2/account/token.php?userid="+userid+";appid="+appid+";vers="+version+";sig="+signature).read())
sessiontoken = out["token"]
return sessiontoken
# Calculates the importance a task had at a given time
def importance(task,timestamp):
if timestamp < task['added']:
return(-1)
if timestamp < task['startdate']:
return(-1)
if (task['completed'] > 0) and (timestamp > task['completed']):
return(-1)
imp = 0
if task['duedate'] > 0:
days_before_deadline = (datetime.date.fromtimestamp(task['duedate'])- datetime.date.fromtimestamp(timestamp)).days
if days_before_deadline < 0:
imp = 6
elif days_before_deadline == 0:
imp = 5
elif days_before_deadline == 1:
imp = 3
elif days_before_deadline <=6:
imp = 2
elif days_before_deadline <= 13:
imp = 1
imp += int(task['priority']) +2 + int(task['star'])
return(imp)
# Calculates the meta-importance a task had at a given time
def meta_importance(task,timestamp):
output={-1:-1,1:0,2:0,3:1,4:1,5:1,6:2,7:2,8:2,9:3,10:3,11:3,12:3}
return(output[importance(task,timestamp)])
if __name__ == '__main__':
print """
Toodledo Activity Tracker & Plotter Copyright (C) 2011 Marc Chauvet (marc DOT chauvet AT gmail DOT com)
This program comes with ABSOLUTELY NO WARRANTY;
This is free software, and you are welcome to redistribute it
under certain conditions;
For further information, please refer to the "LICENSE" file"""
print bcolors.BOLD+"\n\n\nToodledo Activity Tracker & Plotter"+bcolors.ENDC+"\n\nProceeding with initial checks...\n"
padding = 20
print "Application ID: ".ljust(padding) + ((bcolors.OKGREEN+"[OK]") if isOK_appid() else (bcolors.FAIL+bcolors.BOLD+"[NOK]")) + bcolors.ENDC
print "Application token: ".ljust(padding) + ((bcolors.OKGREEN+"[OK]") if isOK_apptoken() else (bcolors.FAIL+bcolors.BOLD+"[NOK]")) + bcolors.ENDC
if not (isOK_appid() and isOK_apptoken()):
exit_message = """The application ID and the application token provided by Toodledo need to be provided to this script.
They should appear in files entitled respectively "appid.dat" and "apptoken.dat", located in the same folder as the script."""
sys.exit(exit_message)
appid = remove_comments("".join(open("appid.dat","r").readlines())).rstrip("\n")
apptoken = remove_comments("".join(open("apptoken.dat","r").readlines())).rstrip("\n")
print "Hashed password: ".ljust(padding) + ((bcolors.OKGREEN+"[OK]") if isOK_hashedpassword() else (bcolors.FAIL+bcolors.BOLD+"[NOK]")) + bcolors.ENDC
print "User ID: ".ljust(padding) + ((bcolors.OKGREEN+"[OK]") if isOK_userid() else (bcolors.FAIL+bcolors.BOLD+"[NOK]")) + bcolors.ENDC
if not (isOK_hashedpassword() and isOK_userid()):
print """
Your hashed Toodledo password and/or your Toodledo user ID are currently unknown.
But they are required to download all your completed tasks. These pieces of information
should appear in files entitled respectively "hashedpassword.dat" and "userid.dat",
located in the same folder as the script. The script will now offer to input these pieces
of information in the relevant files for you. But before doing so, you should be aware that:
* your password will be stored as a MD5 hash in your script folder
(but it will not be stored in plain text);
* your password will be sent unencrypted over HTTP to the Toodledo server to get your userID;
* you are encouraged to look into the source code of this script to ensure that
nothing bad is done with the information you will enter;
* you can do all this manually by:
- killing the script now
- putting in a file entitled "hashedpassword.dat" the MD5 hash of your Toodledo password
- putting in a file entitled "userid.dat" your Toodledo user ID
(found on http://www.toodledo.com/account_edit.php)
- launching this script again."""
email = raw_input("\nIf you wish the script to do the job for you, please insert your Toodledo email address >\n")
password = getpass.getpass("\nNow, please insert your Toodledo password (only stored as a MD5 hash) >\n")
open("userid.dat","w").write(lookup_userid(email,password))
open("hashedpassword.dat","w").write(hashlib.md5(password).hexdigest())
print "\nHashed password: ".ljust(padding) + ((bcolors.OKGREEN+"[OK]") if isOK_hashedpassword() else (bcolors.FAIL+bcolors.BOLD+"[NOK]")) + bcolors.ENDC
print "User ID: ".ljust(padding) + ((bcolors.OKGREEN+"[OK]") if isOK_userid() else (bcolors.FAIL+bcolors.BOLD+"[NOK]")) + bcolors.ENDC
if not (isOK_hashedpassword() and isOK_userid()):
exit_message = """Ending script as there is still an issue with your hashed Toodledo password and/or your Toodledo user ID"""
sys.exit(exit_message)
try:
os.remove('sessiontoken.dat')
print "Session token: ".ljust(padding) + bcolors.OKGREEN + "[Purged]" + bcolors.ENDC
except OSError as e:
print "Session token: ".ljust(padding) + bcolors.OKGREEN + "[Inexistent]" + bcolors.ENDC
open("sessiontoken.dat","w").write(lookup_sessiontoken(get_userid(), appid, apptoken, version))
print "Session token: ".ljust(padding) + bcolors.OKGREEN + "[OK]" + bcolors.ENDC
try:
os.remove('key.dat')
print "Key: ".ljust(padding) + bcolors.OKGREEN + "[Purged]" + bcolors.ENDC
except OSError as e:
print "Key: ".ljust(padding) + bcolors.OKGREEN + "[Inexistent]" + bcolors.ENDC
open("key.dat","w").write(hashlib.md5(get_hashedpassword() + apptoken + get_sessiontoken()).hexdigest())
print "Key: ".ljust(padding) + bcolors.OKGREEN + "[OK]" + bcolors.ENDC
print "\n\nRetrieving all tasks..."
# out = json.loads(urllib2.urlopen("http://api.toodledo.com/2/tasks/get.php?key="+get_key()+";comp=1").read())
all_tasks = json.loads(urllib2.urlopen("http://api.toodledo.com/2/tasks/get.php?key="+get_key()+";fields=priority,duedate,star,startdate,added").read())[1:]
print "Retrieved "+str(len(all_tasks))+" tasks"
print "\n\nPreparing plotting..."
last_completed_timestamp = max([t['completed'] for t in all_tasks])
last_plotted_timestamp = last_completed_timestamp + (int((last_completed_timestamp-time.time())/(60*60*24))+1)*(60*60*24)
first_plotted_timestamp = last_plotted_timestamp - number_of_plotted_days * (60*60*24)
Plotted_timestamps = range(first_plotted_timestamp, last_plotted_timestamp +1, 60*60*24)
d={}
for timestamp in Plotted_timestamps:
a = [meta_importance(task,timestamp) for task in all_tasks]
d[timestamp]={}
for x in [-1,0,1,2,3]:
d[timestamp][x]=a.count(x)
Meta_Imp3 = [d[timestamp][3] for timestamp in Plotted_timestamps]
Meta_Imp2 = [d[timestamp][2]+d[timestamp][3] for timestamp in Plotted_timestamps]
Meta_Imp1 = [d[timestamp][1]+d[timestamp][2]+d[timestamp][3] for timestamp in Plotted_timestamps]
pylab.fill_between(Plotted_timestamps, 0, Meta_Imp3, facecolor='red')
pylab.fill_between(Plotted_timestamps, Meta_Imp3, Meta_Imp2, facecolor='orange')
pylab.fill_between(Plotted_timestamps, Meta_Imp2, Meta_Imp1, facecolor='green')
pylab.xlim( min(Plotted_timestamps), max(Plotted_timestamps) )
pylab.show()
# Count per date the number of closed tasks on that date
# per_day_activity={}
# for d in [datetime.date.fromtimestamp(x[u'completed']) for x in all_tasks if x[u'completed']>0]:
# try:
# per_day_activity[d]+=1
# except KeyError:
# per_day_activity[d]=1
#
# last_plotted_day=datetime.date.today()
#
# Count per hour the number of closed tasks on that hour
per_hour_activity={}
for d in [x[u'completed'] / 3600 for x in all_tasks if x[u'completed']>0]:
try:
per_hour_activity[d]+=1
except KeyError:
per_hour_activity[d]=1
last_plotted_hour=int(round(time.time()/3600))
# List all the days to be plotted, from the oldest to today
# X = [last_plotted_day-datetime.timedelta(x) for x in range(number_of_plotted_days,-1,-1)]
# List all the hours to be plotted, from the oldest to today
XX = range(min(per_hour_activity),last_plotted_hour)
# Plot the number of tasks done each day
# Tasks = []
# for x in X:
# try:
# Tasks.append(per_day_activity[x])
# except KeyError:
# Tasks.append(0)
# Plot the number of tasks done each hour
Tasks = []
for x in XX:
try:
Tasks.append(per_hour_activity[x])
except KeyError:
Tasks.append(None)
# Minimum acceptable per day activity
minimum_acceptable_per_day_activity = 1
Minimum_Acceptable_Activity = [minimum_acceptable_per_day_activity for x in XX]
print "Plotting prepared"
print "\n\nPlotting..."
pylab.bar([x for x in range(len(XX)) if Tasks[x]!=None], [t for t in Tasks if t!=None], width=3, bottom=0, color="g", align="center")
pylab.plot(XX, Minimum_Acceptable_Activity, "r--")
#pylab.bar([(x-last_plotted_day).days for x in X], Tasks, width=1, bottom=0, color="g", align="center")
#pylab.fill_between(X,Tasks,Minimum_Acceptable_Activity,where=[Tasks[i]>=Minimum_Acceptable_Activity[i] for i in range(len(Tasks))],facecolor='green', interpolate=True)
#pylab.fill_between(X,Tasks,Minimum_Acceptable_Activity,where=[Tasks[i]<Minimum_Acceptable_Activity[i] for i in range(len(Tasks))],facecolor='red', interpolate=True)
# pylab.xlim( -number_of_plotted_days, 1 )
#pylab.savefig("plot_toodledo_activity.png")
# Look at http://matplotlib.sourceforge.net/examples/api/path_patch_demo.html
#http://matplotlib.sourceforge.net/users/path_tutorial.html
pylab.show()
print "\n\nBye bye\n\n"
| gpl-3.0 |
theislab/scanpy | scanpy/plotting/_matrixplot.py | 1 | 12979 | from typing import Optional, Union, Mapping # Special
from typing import Sequence # ABCs
from typing import Tuple # Classes
import numpy as np
import pandas as pd
from anndata import AnnData
from matplotlib import pyplot as pl
from matplotlib import rcParams
from matplotlib.colors import Normalize
from .. import logging as logg
from .._utils import _doc_params
from .._compat import Literal
from ._utils import fix_kwds, check_colornorm
from ._utils import ColorLike, _AxesSubplot
from ._utils import savefig_or_show
from .._settings import settings
from ._docs import (
doc_common_plot_args,
doc_show_save_ax,
doc_vboundnorm,
)
from ._baseplot_class import BasePlot, doc_common_groupby_plot_args, _VarNames
@_doc_params(common_plot_args=doc_common_plot_args)
class MatrixPlot(BasePlot):
"""\
Allows the visualization of values using a color map.
Parameters
----------
{common_plot_args}
title
Title for the figure.
expression_cutoff
Expression cutoff that is used for binarizing the gene expression and
determining the fraction of cells expressing given genes. A gene is
expressed only if the expression value is greater than this threshold.
mean_only_expressed
If True, gene expression is averaged only over the cells
expressing the given genes.
standard_scale
Whether or not to standardize that dimension between 0 and 1,
meaning for each variable or group,
subtract the minimum and divide each by its maximum.
values_df
Optionally, a dataframe with the values to plot can be given. The
index should be the grouby categories and the columns the genes names.
kwds
Are passed to :func:`matplotlib.pyplot.scatter`.
See also
--------
:func:`~scanpy.pl.matrixplot`: Simpler way to call MatrixPlot but with less options.
:func:`~scanpy.pl.rank_genes_groups_matrixplot`: to plot marker genes identified
using the :func:`~scanpy.tl.rank_genes_groups` function.
Examples
--------
Simple visualization of the average expression of a few genes grouped by
the category 'bulk_labels'.
.. plot::
:context: close-figs
import scanpy as sc
adata = sc.datasets.pbmc68k_reduced()
markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
sc.pl.MatrixPlot(adata, markers, groupby='bulk_labels').show()
Same visualization but passing var_names as dict, which adds a grouping of
the genes on top of the image:
.. plot::
:context: close-figs
markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
sc.pl.MatrixPlot(adata, markers, groupby='bulk_labels').show()
"""
DEFAULT_SAVE_PREFIX = 'matrixplot_'
DEFAULT_COLOR_LEGEND_TITLE = 'Mean expression\nin group'
# default style parameters
DEFAULT_COLORMAP = rcParams['image.cmap']
DEFAULT_EDGE_COLOR = 'gray'
DEFAULT_EDGE_LW = 0.1
def __init__(
self,
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Union[str, Sequence[str]],
use_raw: Optional[bool] = None,
log: bool = False,
num_categories: int = 7,
categories_order: Optional[Sequence[str]] = None,
title: Optional[str] = None,
figsize: Optional[Tuple[float, float]] = None,
gene_symbols: Optional[str] = None,
var_group_positions: Optional[Sequence[Tuple[int, int]]] = None,
var_group_labels: Optional[Sequence[str]] = None,
var_group_rotation: Optional[float] = None,
layer: Optional[str] = None,
standard_scale: Literal['var', 'group'] = None,
ax: Optional[_AxesSubplot] = None,
values_df: Optional[pd.DataFrame] = None,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
vcenter: Optional[float] = None,
norm: Optional[Normalize] = None,
**kwds,
):
BasePlot.__init__(
self,
adata,
var_names,
groupby,
use_raw=use_raw,
log=log,
num_categories=num_categories,
categories_order=categories_order,
title=title,
figsize=figsize,
gene_symbols=gene_symbols,
var_group_positions=var_group_positions,
var_group_labels=var_group_labels,
var_group_rotation=var_group_rotation,
layer=layer,
ax=ax,
vmin=vmin,
vmax=vmax,
vcenter=vcenter,
norm=norm,
**kwds,
)
if values_df is None:
# compute mean value
values_df = self.obs_tidy.groupby(level=0).mean()
if standard_scale == 'group':
values_df = values_df.sub(values_df.min(1), axis=0)
values_df = values_df.div(values_df.max(1), axis=0).fillna(0)
elif standard_scale == 'var':
values_df -= values_df.min(0)
values_df = (values_df / values_df.max(0)).fillna(0)
elif standard_scale is None:
pass
else:
logg.warning('Unknown type for standard_scale, ignored')
self.values_df = values_df
self.cmap = self.DEFAULT_COLORMAP
self.edge_color = self.DEFAULT_EDGE_COLOR
self.edge_lw = self.DEFAULT_EDGE_LW
def style(
self,
cmap: str = DEFAULT_COLORMAP,
edge_color: Optional[ColorLike] = DEFAULT_EDGE_COLOR,
edge_lw: Optional[float] = DEFAULT_EDGE_LW,
):
"""\
Modifies plot visual parameters.
Parameters
----------
cmap
String denoting matplotlib color map.
edge_color
Edge color between the squares of matrix plot. Default is gray
edge_lw
Edge line width.
Returns
-------
:class:`~scanpy.pl.MatrixPlot`
Examples
-------
.. plot::
:context: close-figs
import scanpy as sc
adata = sc.datasets.pbmc68k_reduced()
markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
Change color map and turn off edges:
.. plot::
:context: close-figs
(
sc.pl.MatrixPlot(adata, markers, groupby='bulk_labels')
.style(cmap='Blues', edge_color='none')
.show()
)
"""
# change only the values that had changed
if cmap != self.cmap:
self.cmap = cmap
if edge_color != self.edge_color:
self.edge_color = edge_color
if edge_lw != self.edge_lw:
self.edge_lw = edge_lw
return self
def _mainplot(self, ax):
# work on a copy of the dataframes. This is to avoid changes
# on the original data frames after repetitive calls to the
# MatrixPlot object, for example once with swap_axes and other without
_color_df = self.values_df.copy()
if self.var_names_idx_order is not None:
_color_df = _color_df.iloc[:, self.var_names_idx_order]
if self.categories_order is not None:
_color_df = _color_df.loc[self.categories_order, :]
if self.are_axes_swapped:
_color_df = _color_df.T
cmap = pl.get_cmap(self.kwds.get('cmap', self.cmap))
if 'cmap' in self.kwds:
del self.kwds['cmap']
normalize = check_colornorm(
self.vboundnorm.vmin,
self.vboundnorm.vmax,
self.vboundnorm.vcenter,
self.vboundnorm.norm,
)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(1.5)
kwds = fix_kwds(
self.kwds,
cmap=cmap,
edgecolor=self.edge_color,
linewidth=self.edge_lw,
norm=normalize,
)
_ = ax.pcolor(_color_df, **kwds)
y_labels = _color_df.index
x_labels = _color_df.columns
y_ticks = np.arange(len(y_labels)) + 0.5
ax.set_yticks(y_ticks)
ax.set_yticklabels(y_labels)
x_ticks = np.arange(len(x_labels)) + 0.5
ax.set_xticks(x_ticks)
ax.set_xticklabels(x_labels, rotation=90, ha='center', minor=False)
ax.tick_params(axis='both', labelsize='small')
ax.grid(False)
# to be consistent with the heatmap plot, is better to
# invert the order of the y-axis, such that the first group is on
# top
ax.set_ylim(len(y_labels), 0)
ax.set_xlim(0, len(x_labels))
return normalize
@_doc_params(
show_save_ax=doc_show_save_ax,
common_plot_args=doc_common_plot_args,
groupby_plots_args=doc_common_groupby_plot_args,
vminmax=doc_vboundnorm,
)
def matrixplot(
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Union[str, Sequence[str]],
use_raw: Optional[bool] = None,
log: bool = False,
num_categories: int = 7,
figsize: Optional[Tuple[float, float]] = None,
dendrogram: Union[bool, str] = False,
title: Optional[str] = None,
cmap: Optional[str] = MatrixPlot.DEFAULT_COLORMAP,
colorbar_title: Optional[str] = MatrixPlot.DEFAULT_COLOR_LEGEND_TITLE,
gene_symbols: Optional[str] = None,
var_group_positions: Optional[Sequence[Tuple[int, int]]] = None,
var_group_labels: Optional[Sequence[str]] = None,
var_group_rotation: Optional[float] = None,
layer: Optional[str] = None,
standard_scale: Literal['var', 'group'] = None,
values_df: Optional[pd.DataFrame] = None,
swap_axes: bool = False,
show: Optional[bool] = None,
save: Union[str, bool, None] = None,
ax: Optional[_AxesSubplot] = None,
return_fig: Optional[bool] = False,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
vcenter: Optional[float] = None,
norm: Optional[Normalize] = None,
**kwds,
) -> Union[MatrixPlot, dict, None]:
"""\
Creates a heatmap of the mean expression values per group of each var_names.
This function provides a convenient interface to the :class:`~scanpy.pl.MatrixPlot`
class. If you need more flexibility, you should use :class:`~scanpy.pl.MatrixPlot`
directly.
Parameters
----------
{common_plot_args}
{groupby_plots_args}
{show_save_ax}
{vminmax}
kwds
Are passed to :func:`matplotlib.pyplot.pcolor`.
Returns
-------
If `return_fig` is `True`, returns a :class:`~scanpy.pl.MatrixPlot` object,
else if `show` is false, return axes dict
See also
--------
:class:`~scanpy.pl.MatrixPlot`: The MatrixPlot class can be used to to control
several visual parameters not available in this function.
:func:`~scanpy.pl.rank_genes_groups_matrixplot`: to plot marker genes
identified using the :func:`~scanpy.tl.rank_genes_groups` function.
Examples
--------
.. plot::
:context: close-figs
import scanpy as sc
adata = sc.datasets.pbmc68k_reduced()
markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
sc.pl.matrixplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Using var_names as dict:
.. plot::
:context: close-figs
markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
sc.pl.matrixplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Get Matrix object for fine tuning:
.. plot::
:context: close-figs
mp = sc.pl.matrixplot(adata, markers, 'bulk_labels', return_fig=True)
mp.add_totals().style(edge_color='black').show()
The axes used can be obtained using the get_axes() method
.. plot::
:context: close-figs
axes_dict = mp.get_axes()
"""
mp = MatrixPlot(
adata,
var_names,
groupby=groupby,
use_raw=use_raw,
log=log,
num_categories=num_categories,
standard_scale=standard_scale,
title=title,
figsize=figsize,
gene_symbols=gene_symbols,
var_group_positions=var_group_positions,
var_group_labels=var_group_labels,
var_group_rotation=var_group_rotation,
layer=layer,
values_df=values_df,
ax=ax,
vmin=vmin,
vmax=vmax,
vcenter=vcenter,
norm=norm,
**kwds,
)
if dendrogram:
mp.add_dendrogram(dendrogram_key=dendrogram)
if swap_axes:
mp.swap_axes()
mp = mp.style(cmap=cmap).legend(title=colorbar_title)
if return_fig:
return mp
else:
mp.make_figure()
savefig_or_show(MatrixPlot.DEFAULT_SAVE_PREFIX, show=show, save=save)
show = settings.autoshow if show is None else show
if not show:
return mp.get_axes()
| bsd-3-clause |
sbobovyc/JA-BiA-Tools | src/legacy/find_pkle.py | 1 | 4434 | """
Created on February 16, 2012
@author: sbobovyc
"""
"""
Copyright (C) 2012 Stanislav Bobovych
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa366907%28v=vs.85%29.aspx
# http://msdn.microsoft.com/en-us/library/windows/desktop/ee175820%28v=vs.85%29.aspx
# http://msdn.microsoft.com/en-us/library/windows/desktop/ee175819%28v=vs.85%29.aspx
import win32con
import winappdbg
from ctypes import *
from ctypes.wintypes import *
from win32com.client import GetObject
import binascii
from matplotlib.mlab import ma
MEM_MIN = 0x00400000
MEM_MAX = 0x7FFFFFFF
def get_pid_by_name(exe_name):
WMI = GetObject('winmgmts:')
processes = WMI.InstancesOf('Win32_Process')
len(processes)
# print [process.Properties_('Name').Value for process in processes]
p = WMI.ExecQuery('select * from Win32_Process where Name="%s"' % exe_name)
# print [prop.Name for prop in p[0].Properties_]
pid = p[0].Properties_('ProcessId').Value # get our ProcessId
return pid
def get_segment_size(pid, lpAddress):
hProcess = windll.kernel32.OpenProcess(win32con.PROCESS_QUERY_INFORMATION, False, int(pid))
lpBuffer = winappdbg.win32.MEMORY_BASIC_INFORMATION()
dwLength = sizeof(lpBuffer)
status = windll.kernel32.VirtualQueryEx(hProcess, lpAddress, byref(lpBuffer), dwLength)
if status == 0:
print "Error"
return
# print hex(lpBuffer.BaseAddress)
# print hex(lpBuffer.RegionSize)
size = (lpBuffer.BaseAddress + lpBuffer.RegionSize) - lpAddress
# print hex(size)
return size
def search_proc_memory(pid, search_data, match_limit=None, min=MEM_MIN, max=MEM_MAX):
hProcess = windll.kernel32.OpenProcess(win32con.PROCESS_ALL_ACCESS, False, int(pid))
if not hProcess: #Just in case error occurs.
raise IOError, "Couldn't acquire a handle to PID: %s" % pid
# print hProcess
# VIRTUAL_MEM = win32con.MEM_COMMIT | win32con.MEM_RESERVE
# argument_address = windll.kernel32.VirtualAllocEx( hProcess, 0, len(str(search_data)), VIRTUAL_MEM, win32con.PAGE_READWRITE)
# print hex(argument_address)
# memory_addresses = map(int, range(argument_address)) #Program Memory Range.
# print "First memory address", hex(memory_addresses[0])
read_buffer = create_string_buffer(len(str(search_data)))
count = c_ulong(0)
match_address = []
j = min
while j < max:
windll.kernel32.ReadProcessMemory(hProcess, j, read_buffer, len(search_data), byref(count))
data = read_buffer.raw
if data == search_data:
match_address.append(j)
print "Found %s at" % search_data, hex(j)
j += 32 #word alignment
if len(match_address) == match_limit:
break
return match_address
def read_mem(pid, lpAddress, size):
hProcess = windll.kernel32.OpenProcess(win32con.PROCESS_ALL_ACCESS, False, int(pid))
buffer = create_string_buffer(size)
count = c_ulong(0)
status = windll.kernel32.ReadProcessMemory(hProcess, lpAddress, buffer, size, byref(count))
return buffer.raw
if __name__ == "__main__":
bin_name = "GameJaBiA.exe"
print "Searching %s" % bin_name
pid = get_pid_by_name(bin_name)
print "Found process ID:", pid
data = "PKLE"
match_address = search_proc_memory(pid, data, match_limit=2)
size_list = []
for address in match_address:
size_list.append(get_segment_size(pid, address))
f = open("configs_win32.pak", "wb")
data = read_mem(pid, match_address[0], size_list[0])
f.write(data)
f.close()
f = open("interface_win32.pak", "wb")
data = read_mem(pid, match_address[1], size_list[1])
f.write(data)
f.close()
| gpl-3.0 |
probml/pyprobml | scripts/ising_image_denoise_demo.py | 1 | 2759 | # -*- coding: utf-8 -*-
"""
Author: Ang Ming Liang
Based on: https://github.com/probml/pmtk3/blob/master/demos/isingImageDenoiseDemo.m
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#from tqdm.notebook import tqdm
from tqdm import tqdm
from scipy.stats import norm
import pyprobml_utils as pml
np.random.seed(1)
sigma =2
img = pd.read_csv("../data/letterA.csv").to_numpy()
mean = np.mean(img)
base = np.ones(img.shape)
img2 = base*(img > mean) - base*(img<mean)
y = img2 + sigma*np.random.normal(size = img2.shape)
fig, axs = plt.subplots(1,2)
axs[0].imshow(img2, cmap="Greys")
axs[1].imshow(y, cmap="Greys")
plt.show()
def sigmoid(x):
return 1/(1+np.exp(-x))
def energy(ix, iy, X, J):
wi = 0
if iy > 0:
wi += X[iy-1, ix]
if iy < X.shape[0]-1:
wi += X[iy+1, ix]
if ix > 0:
wi += X[iy, ix-1]
if ix<X.shape[1]-1:
wi += X[iy, ix+1]
return 2*J*wi
def posterior_mean(f):
return X[Nburin:]
def gibbs(rng, img, J, niter=10, nburin=0):
if not niter:
return img
assert niter>nburin, "niter cannot be the same or smaller then the nburin"
img2 = img.copy()
normNeg = norm(loc=-1, scale=np.sqrt(sigma))
normPos = norm(loc=1, scale=np.sqrt(sigma))
logOdds = normNeg.logpdf(img)- normPos.logpdf(img)
X = np.zeros(img.shape)
for iter in tqdm(range(niter)):
for ix in range(img.shape[1]):
for iy in range(img.shape[0]):
e = energy(ix, iy, img2, J)
if rng.random(1) < sigmoid(e - logOdds[iy, ix]):
img2[ iy, ix] = 1
else:
img2[ iy, ix] = -1
X += img2
return (1/(niter-nburin))*X
def meanfield(img, J, niter=10, rate=1):
img2 = img.copy()
normNeg = norm(loc=-1, scale=np.sqrt(sigma))
normPos = norm(loc=1, scale=np.sqrt(sigma))
logOdds = normNeg.logpdf(img)- normPos.logpdf(img)
p1 = sigmoid(logOdds)
mu = 2*p1 - 1
if not niter:
return img
for iter in tqdm(range(niter)):
for ix in range(img.shape[1]):
for iy in range(img.shape[0]):
Sbar = energy(ix, iy, mu, J)
mu[ iy, ix] = (1-rate)*mu[iy, ix] + rate*np.tanh(Sbar + 0.5*logOdds[iy, ix])
return mu
#Mean field figure
seed = 10
rng = np.random.default_rng(seed)
iters = [0, 1, 5, 15]
fig, axs = plt.subplots(1,4)
for i, x in enumerate(iters):
axs[i].imshow(meanfield(y, 1, niter=x), cmap="Greys")
axs[i].set_title(f"sample {x}")
plt.suptitle('Mean field')
plt.tight_layout()
pml.savefig('meanFieldDenoising.pdf')
plt.show()
# Gibbs Sampling figure
fig, axs = plt.subplots(1,4)
for i, x in enumerate(iters):
axs[i].set_title(f"sample {x}")
axs[i].imshow(gibbs(rng, y, 1, niter=x), cmap="Greys")
plt.suptitle('Gibbs sampling')
plt.tight_layout()
pml.savefig('gibbsDenoising.pdf')
plt.show()
| mit |
jlegendary/scikit-learn | sklearn/cross_validation.py | 96 | 58309 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
krafczyk/spack | var/spack/repos/builtin/packages/py-elephant/package.py | 4 | 2375 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyElephant(PythonPackage):
"""Elephant is a package for analysis of electrophysiology data in Python
"""
homepage = "http://neuralensemble.org/elephant"
url = "https://pypi.io/packages/source/e/elephant/elephant-0.3.0.tar.gz"
version('0.4.1', '0e6214c96cae6ce777e4b3cf29bbdaa9')
version('0.3.0', '84e69e6628fd617af469780c30d2da6c')
variant('doc', default=False, description='Build the documentation')
variant('pandas', default=True, description='Build with pandas')
depends_on('py-setuptools', type='build')
depends_on('py-neo@0.3.4:', type=('build', 'run')) # > 0.3.3 ?
depends_on('py-numpy@1.8.2:', type=('build', 'run'))
depends_on('py-quantities@0.10.1:', type=('build', 'run'))
depends_on('py-scipy@0.14.0:', type=('build', 'run'))
depends_on('py-pandas@0.14.1:', type=('build', 'run'), when='+pandas')
depends_on('py-numpydoc@0.5:', type=('build', 'run'), when='+docs')
depends_on('py-sphinx@1.2.2:', type=('build', 'run'), when='+docs')
depends_on('py-nose@1.3.3:', type='test')
| lgpl-2.1 |
ambimanus/appsim | stats.py | 1 | 9689 | import sys
import os
from datetime import timedelta
import numpy as np
import scenario_factory
# http://www.javascripter.net/faq/hextorgb.htm
PRIMA = (148/256, 164/256, 182/256)
PRIMB = (101/256, 129/256, 164/256)
PRIM = ( 31/256, 74/256, 125/256)
PRIMC = ( 41/256, 65/256, 94/256)
PRIMD = ( 10/256, 42/256, 81/256)
EC = (1, 1, 1, 0)
GRAY = (0.5, 0.5, 0.5)
WHITE = (1, 1, 1)
def obj(target, x):
return np.sum(np.abs(target - x))
def _f(target, x):
return obj(target, x) / np.sum(np.abs(target))
def p(basedir, fn):
return os.path.join(basedir, fn)
def resample(d, resolution):
# resample the innermost axis to 'resolution'
shape = tuple(d.shape[:-1]) + (int(d.shape[-1]/resolution), resolution)
return d.reshape(shape).sum(-1)/resolution
def load(f):
with np.load(f) as npz:
data = np.array([npz[k] for k in sorted(npz.keys())])
return data
def stats(fn):
sc_file = fn
bd = os.path.dirname(sc_file)
sc = scenario_factory.Scenario()
sc.load_JSON(sc_file)
print(sc.title)
unctrl = load(p(bd, sc.run_unctrl_datafile))
block = load(p(bd, sc.run_ctrl_datafile))
post = load(p(bd, sc.run_post_datafile))
sched = load(p(bd, sc.sched_file))
ctrl = np.zeros(unctrl.shape)
idx = 0
for l in (block, post):
ctrl[:,:,idx:idx + l.shape[-1]] = l
idx += l.shape[-1]
if sched.shape[-1] == unctrl.shape[-1] / 15:
print('Extending schedules shape by factor 15')
sched = sched.repeat(15, axis=1)
t_start, b_start, b_end = sc.t_start, sc.t_block_start, sc.t_block_end
div = 1
if (b_end - t_start).total_seconds() / 60 == sched.shape[-1] * 15:
div = 15
elif (b_end - t_start).total_seconds() / 60 == sched.shape[-1] * 60:
div = 60
b_s = (b_start - sc.t_start).total_seconds() / 60 / div
b_e = (b_end - sc.t_start).total_seconds() / 60 / div
ctrl_sched = np.zeros((unctrl.shape[0], unctrl.shape[-1]))
ctrl_sched = np.ma.array(ctrl_sched)
ctrl_sched[:,:b_s] = np.ma.masked
ctrl_sched[:,b_s:b_e] = sched[:,b_s:b_e]
ctrl_sched[:,b_e:] = np.ma.masked
# plot_each_device(sc, unctrl, ctrl, sched)
minutes = (sc.t_end - sc.t_start).total_seconds() / 60
assert unctrl.shape[-1] == ctrl.shape[-1] == ctrl_sched.shape[-1]
shape = unctrl.shape[-1]
if minutes == shape:
print('data is 1-minute resolution, will be resampled by 60')
res = 60
elif minutes == shape * 15:
print('data is 15-minute resolution, will be resampled by 4')
res = 4
elif minutes == shape * 60:
print('data is 60-minute resolution, all fine')
res = 1
else:
raise RuntimeError('unsupported data resolution: %.2f' % (minutes / shape))
unctrl = resample(unctrl, res)
ctrl = resample(ctrl, res)
ctrl_sched = resample(ctrl_sched, res)
# code above is from analyze
###########################################################################
# code below calculates stats
print('mean load: %.2f kW' % (unctrl[:,0,:].sum(0).mean() / 1000.0))
t_day_start = sc.t_block_start - timedelta(hours=sc.t_block_start.hour,
minutes=sc.t_block_start.minute)
skip = (t_day_start - sc.t_start).total_seconds() / 60 / 60
i_block_start = (sc.t_block_start - t_day_start).total_seconds() / 60 / 60
i_block_end = (sc.t_block_end - t_day_start).total_seconds() / 60 / 60
P_el_unctrl = unctrl[:,0,skip:].sum(0)
P_el_ctrl = ctrl[:,0,skip:].sum(0)
P_el_sched = ctrl_sched[:,skip:].sum(0)
T_storage_ctrl = ctrl[:,2,skip:]
# Stats
target = np.ma.zeros((minutes / 60 - skip,))
target[:i_block_start] = np.ma.masked
target[i_block_start:i_block_end] = np.array(sc.block)
target[i_block_end:] = np.ma.masked
# print('target = %s' % target)
pairs = [
(target, P_el_sched, 'target', 'P_el_sched'),
(target, P_el_ctrl, 'target', 'P_el_ctrl'),
(target, P_el_unctrl, 'target', 'P_el_unctrl'),
(P_el_sched, P_el_ctrl, 'P_el_sched', 'P_el_ctrl'),
(P_el_sched, P_el_unctrl, 'P_el_sched', 'P_el_unctrl'),
(P_el_unctrl, P_el_ctrl, 'P_el_unctrl', 'P_el_ctrl'),
]
mask = target.mask
st = [sc.title]
for target, data, tname, dname in pairs:
target = np.ma.array(target, mask=mask)
data = np.ma.array(data, mask=mask)
diff = obj(target, data)
perf = max(0, 1 - _f(target, data))
perf_abs = perf * 100.0
# if perf_abs > 100.0:
# perf_abs = 100 - min(100, max(0, perf_abs - 100))
print('obj(%s, %s) = %.2f kW (%.2f %%)' % (tname, dname, diff / 1000.0, perf_abs))
st.append(perf_abs)
# Synchronism
syncs = []
pairs = [
(i_block_start, 'block_start'),
(i_block_end, 'block_end'),
(23, 'day_end'),
(47, 'sim_end'),
]
for timestamp, name in pairs:
s = sync(T_storage_ctrl[:,timestamp] - 273) * 100.0
syncs.append(s)
print('sync(%s) = %.2f' % (name, s))
print()
return st, syncs
def sync(data):
hist, edges = np.histogram(data, len(data))
return (max(hist) - 1) / (len(data) - 1)
def autolabel(ax, rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
pos = rect.get_x()+rect.get_width()/2.
ax.text(pos, 0.9 * height, '%.1f \\%%' % height, ha='center', va='bottom',
color=PRIMD, fontsize=5)
def autolabel_sync(ax, rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
pos = rect.get_x()+rect.get_width()/2.
ax.text(pos, 1.05 * height, '%.1f \\%%' % height, ha='center', va='bottom',
color=PRIM, fontsize=6)
def plot_stats(names, target_sched, target_ctrl, target_unctrl, sched_ctrl, sched_unctrl, unctrl_ctrl):
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedLocator
fig = plt.figure(figsize=(6.39, 1.75))
fig.subplots_adjust(bottom=0.3)
x = np.arange(len(names))
ax0 = fig.add_subplot(111)
ax0.set_ylim(50, 100)
ax0.set_ylabel(r"""Planguete [\%]""", fontsize='small')
ax0.grid(False, which='major', axis='x')
bars = ax0.bar(x, target_sched, align='center', width=0.5, facecolor=PRIM+(0.5,), edgecolor=EC)
autolabel(ax0, bars)
# ax1 = fig.add_subplot(212, sharex=ax0)
# ax1.set_ylim(50, 100)
# ax1.set_ylabel(r"""Erbringung [\%]""", fontsize='small')
# ax1.grid(False, which='major', axis='x')
# bars = ax1.bar(x, target_ctrl, align='center', width=0.5, facecolor=PRIM+(0.5,), edgecolor=EC)
# autolabel(ax1, bars)
# plt.setp(ax0.get_xticklabels(), visible=False)
ax0.xaxis.set_major_locator(FixedLocator(x))
ax0.set_xticklabels(names, fontsize='xx-small', rotation=45, rotation_mode='anchor', ha='right')
ax0.set_xlim(-0.5, x[-1] + 0.5)
return fig
def plot_syncs(names,
sync_block_start, sync_block_end, sync_day_end, sync_sim_end):
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedLocator, MaxNLocator
fig = plt.figure(figsize=(6.39, 8))
fig.subplots_adjust(bottom=0.1, hspace=0.6)
data = np.array(
[sync_block_start, sync_block_end, sync_day_end, sync_sim_end]).T
x = np.arange(data.shape[-1])
for i in range(len(names)):
ax = fig.add_subplot(len(names), 1, i + 1)
ax.set_ylim(0, 50)
ax.set_ylabel('$\mathit{sync}(t)$', fontsize='small')
plt.text(0.5, 1.08, names[i], fontsize='x-small', color='#555555',
ha='center', transform=ax.transAxes)
ax.grid(False, which='major', axis='x')
bars = ax.bar(x, data[i], align='center', width=0.5, facecolor=PRIM+(0.5,), edgecolor=EC)
autolabel_sync(ax, bars)
# ax.yaxis.set_major_locator(MaxNLocator(nbins=3))
plt.setp(ax.get_yticklabels(), fontsize='small')
if i < len(names) - 1:
plt.setp(ax.get_xticklabels(), visible=False)
else:
xticks = [
'$t^{\mathrm{block}}_{\mathrm{start}}$',
'$t^{\mathrm{block}}_{\mathrm{end}}$',
'$t^{\mathrm{trade}}_{\mathrm{end}}$',
'$t^{\mathrm{sim}}_{\mathrm{end}}$',
]
ax.xaxis.set_major_locator(FixedLocator(x))
ax.set_xticklabels(xticks, fontsize='small')
ax.set_xlim(-0.5, x[-1] + 0.5)
return fig
if __name__ == '__main__':
names = []
target_sched, target_ctrl, target_unctrl = [], [], []
sched_ctrl, sched_unctrl = [], []
unctrl_ctrl = []
sync_block_start, sync_block_end = [], []
sync_day_end, sync_sim_end = [], []
for dn in sys.argv[1:]:
if os.path.isdir(dn):
st, syncs = stats(p(dn, '0.json'))
for l, d in zip((names, target_sched, target_ctrl, target_unctrl,
sched_ctrl, sched_unctrl, unctrl_ctrl),
st):
l.append(d)
for l, d in zip((sync_block_start, sync_block_end,
sync_day_end, sync_sim_end),
syncs):
l.append(d)
# fig = plot_stats(names, target_sched, target_ctrl, target_unctrl,
# sched_ctrl, sched_unctrl, unctrl_ctrl)
# fig.savefig(p(os.path.split(dn)[0], 'stats.pdf'))
# import matplotlib.pyplot as plt
# plt.show()
# fig = plot_syncs(names, sync_block_start, sync_block_end, sync_day_end,
# sync_sim_end)
# fig.savefig(p(os.path.split(dn)[0], 'sync.pdf'))
# plt.show()
| mit |
Zhenxingzhang/kaggle-cdiscount-classification | src/data_preparation/dataset.py | 1 | 8215 | import pandas as pd
import numpy as np
from sklearn import preprocessing
from src.common import paths
import tensorflow as tf
from src.common import consts
from os import listdir
from os.path import isfile, join
from src.vgg_fine_tuning.vgg_preprocessing import _preprocess_for_train, _preprocess_for_val
def read_record_to_queue(tf_record_name, shapes, preproc_func=None, num_epochs=10, batch_size=32,
capacity=2000, min_after_dequeue=1000):
# this function return images_batch and labels_batch op that can be executed using sess.run
filename_queue = tf.train.string_input_producer([tf_record_name]) # , num_epochs=num_epochs)
def read_and_decode_single_example(filename_queue_):
# first construct a queue containing a list of filenames.
# this lets a user split up there dataset in multiple files to keep
# size down
# Unlike the TFRecordWriter, the TFRecordReader is symbolic
opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
reader = tf.TFRecordReader(options=opts)
# One can read a single serialized example from a filename
# serialized_example is a Tensor of type string.
_, serialized_example = reader.read(filename_queue_)
# The serialized example is converted back to actual values.
# One needs to describe the format of the objects to be returned
_features = tf.parse_single_example(
serialized_example,
features={
# We know the length of both fields. If not the
# tf.VarLenFeature could be used
'_id': tf.FixedLenFeature([], tf.int64),
consts.IMAGE_RAW_FIELD: tf.FixedLenFeature([], tf.string),
# consts.INCEPTION_OUTPUT_FIELD: tf.FixedLenFeature([consts.INCEPTION_CLASSES_COUNT], tf.float32),
consts.LABEL_ONE_HOT_FIELD: tf.FixedLenFeature([], tf.int64)
})
image_raw = tf.decode_raw(_features['img_raw'], tf.uint8)
_label = tf.cast(_features[consts.LABEL_ONE_HOT_FIELD], tf.int64)
_image = tf.reshape(image_raw, shapes)
_preproc_image = preproc_func(_image) if preproc_func is not None else _image
return _label, _preproc_image
# returns symbolic label and image
label, image = read_and_decode_single_example(filename_queue)
# groups examples into batches randomly
images_batch, labels_batch = tf.train.shuffle_batch([image, label], batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
return images_batch, labels_batch
def validate_train_data_queue(tfrecords_filename, shapes):
img_batch, label_batch = read_record_to_queue(tfrecords_filename, shapes)
# The op for initializing the variables.
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Let's read off 3 batches just for example
for i in range(3):
imgs, labels = sess.run([img_batch, label_batch])
print(imgs.shape)
coord.request_stop()
coord.join(threads)
def read_train_tf_record(record):
_features = tf.parse_single_example(
record,
features={
# We know the length of both fields. If not the
# tf.VarLenFeature could be used
'_id': tf.FixedLenFeature([], tf.int64),
# consts.IMAGE_RAW_FIELD: tf.FixedLenFeature([], tf.string),
consts.INCEPTION_OUTPUT_FIELD: tf.FixedLenFeature([consts.INCEPTION_CLASSES_COUNT], tf.float32),
consts.LABEL_ONE_HOT_FIELD: tf.FixedLenFeature([], tf.int64)
})
return _features
def features_dataset():
_file_names = tf.placeholder(tf.string)
_ds = tf.contrib.data.TFRecordDataset(_file_names, compression_type='ZLIB').map(read_train_tf_record)
return _ds, _file_names
def _decode_jpeg(image_bytes_):
'''
Read JPEG encoded bytes from file and decode to 3D float Tensor
'''
image_decoded = tf.image.decode_jpeg(image_bytes_, channels=3)
return tf.image.convert_image_dtype(image_decoded, dtype=tf.float32)
from PIL import Image
def read_train_image_record(record):
_features = tf.parse_single_example(
record,
features={
# We know the length of both fields. If not the
# tf.VarLenFeature could be used
'_id': tf.FixedLenFeature([], tf.int64),
consts.IMAGE_RAW_FIELD: tf.FixedLenFeature([], tf.string),
consts.LABEL_ONE_HOT_FIELD: tf.FixedLenFeature([], tf.int64)
})
_features["_image"], _ = _preprocess_for_val(_decode_jpeg(
_features[consts.IMAGE_RAW_FIELD]), None, 224, 224, 256)
return _features
def image_dataset():
_file_names = tf.placeholder(tf.string)
_ds = tf.contrib.data.TFRecordDataset(_file_names, compression_type='ZLIB').map(read_train_image_record)
return _ds, _file_names
def read_test_tf_record(record):
return tf.parse_single_example(
record,
features={
'_id': tf.FixedLenFeature([], tf.int64),
# consts.IMAGE_RAW_FIELD: tf.FixedLenFeature([], tf.string),
consts.INCEPTION_OUTPUT_FIELD: tf.FixedLenFeature([consts.INCEPTION_CLASSES_COUNT], tf.float32),
})
def read_test_image_record(record):
return tf.parse_single_example(
record,
features={
'_id': tf.FixedLenFeature([], tf.int64),
consts.IMAGE_RAW_FIELD: tf.FixedLenFeature([], tf.string)
# consts.INCEPTION_OUTPUT_FIELD: tf.FixedLenFeature([consts.INCEPTION_CLASSES_COUNT], tf.float32),
})
def test_features_dataset():
file_names_ = tf.placeholder(tf.string)
ds_ = tf.contrib.data.TFRecordDataset(file_names_, compression_type='ZLIB') \
.map(read_test_tf_record)
return ds_, file_names_
def test_image_dataset():
file_names_ = tf.placeholder(tf.string)
ds_ = tf.contrib.data.TFRecordDataset(file_names_, compression_type='ZLIB').map(read_test_image_record)
return ds_, file_names_
def one_hot_label_encoder(csv_path=paths.CATEGORIES):
_category_labels = pd.read_csv(csv_path, dtype={'category_id': np.str})
_lb = preprocessing.LabelBinarizer()
_lb.fit(_category_labels['category_id'])
def find_max_idx(lb_vec):
_lb_vector = lb_vec.reshape(-1).tolist()
return _lb_vector.index(max(_lb_vector))
def encode(lbs_str):
_lbs_vector = np.asarray(_lb.transform(lbs_str), dtype=np.float32)
return np.apply_along_axis(find_max_idx, 1, _lbs_vector)
def decode(one_hots):
# _lbs_vector = label_vector(one_hots)
return _lb.inverse_transform(np.array(one_hots))
return encode, decode
if __name__ == '__main__':
# one_hot_encoder, _ = one_hot_label_encoder("data/category_names.csv")
# lb_idx = one_hot_encoder(["1000012764"])
#
# print(lb_idx)
with tf.Graph().as_default() as g, tf.Session().as_default() as sess:
ds, filenames = features_dataset()
ds_iter = ds.shuffle(buffer_size=1000, seed=1).batch(10).make_initializable_iterator()
# ds_iter = ds.batch(10).make_initializable_iterator()
next_record = ds_iter.get_next()
train_data_dir = "/data/data/train/tf_records/"
file_names = [join(train_data_dir, f) for f in listdir(train_data_dir) if isfile(join(train_data_dir, f)) and f.endswith(".tfrecord")]
# file_names = ["/data/data/train_example.tfrecords"]
sess.run(ds_iter.initializer, feed_dict={filenames: file_names})
features = sess.run(next_record)
# _, one_hot_decoder = one_hot_label_encoder()
print(features['_id'])
print(features[consts.LABEL_ONE_HOT_FIELD])
print(features['inception_output'].shape)
# validate_train_data("/data/data/train_example.tfrecords", np.asarray([180, 180, 3]))
| apache-2.0 |
swalter2/PersonalizationService | Service/feature.py | 1 | 14598 | # -*- coding: utf-8 -*-
from sklearn import svm
from sklearn.feature_extraction import DictVectorizer
from sklearn import cross_validation
import numpy as np
import pickle
import sys
#listen nötig für cross-feature-berechnungen
RESSORTS = ['Kultur','Bielefeld','Sport Bielefeld','Politik','Sport_Bund']
NORMALIZED_PAGES = ['1','2','3','4-5','6-7','8','9-16','17-24','25+']
NORMALIZED_AGES = ['bis30','30-35','35-40','40-45','45-50','50-60','60-70','groeßer70']
SEXES = ['m','f']
EDUCATIONS = ['Mittlere Reife','Hochschulabschluss','Abitur','Sonstiges']
#normalisiert seiten auf die von philipp recherchierten bereiche
def normalize_pages(page):
dict = {'1':0,'2':0,'3':0,'4-5':0,'6-7':0,'8':0,'9-16':0,'17-24':0,'25+':0}
if page == 1:
dict['1'] = 1
elif page == 2:
dict['2'] = 1
elif page == 3:
dict['3'] = 1
elif page < 6:
dict['4-5'] = 1
elif page < 8:
dict['6-7'] = 1
elif page == 8:
dict['8'] = 1
elif page < 17:
dict['9-16'] = 1
elif page < 25:
dict['17-24'] = 1
else:
dict['25+'] = 1
return dict
def normalize_age(age):
feature = {}
if age < 30:
feature['bis30'] = 1
feature['30-35'] = 0
feature['35-40'] = 0
feature['40-45'] = 0
feature['45-50'] = 0
feature['50-60'] = 0
feature['60-70'] = 0
feature['groeßer70'] = 0
if age >= 30 and age < 35:
feature['bis30'] = 0
feature['30-35'] = 1
feature['35-40'] = 0
feature['40-45'] = 0
feature['45-50'] = 0
feature['50-60'] = 0
feature['60-70'] = 0
feature['groeßer70'] = 0
if age >= 35 and age < 40:
feature['bis30'] = 0
feature['30-35'] = 0
feature['35-40'] = 1
feature['40-45'] = 0
feature['45-50'] = 0
feature['50-60'] = 0
feature['60-70'] = 0
feature['groeßer70'] = 0
if age >= 40 and age < 45:
feature['bis30'] = 0
feature['30-35'] = 0
feature['35-40'] = 0
feature['40-45'] = 1
feature['45-50'] = 0
feature['50-60'] = 0
feature['60-70'] = 0
feature['groeßer70'] = 0
if age >= 45 and age < 50:
feature['bis30'] = 0
feature['30-35'] = 0
feature['35-40'] = 0
feature['40-45'] = 0
feature['45-50'] = 1
feature['50-60'] = 0
feature['60-70'] = 0
feature['groeßer70'] = 0
if age >= 50 and age < 60:
feature['bis30'] = 0
feature['30-35'] = 0
feature['35-40'] = 0
feature['40-45'] = 0
feature['45-50'] = 0
feature['50-60'] = 1
feature['60-70'] = 0
feature['groeßer70'] = 0
if age >= 60 and age < 70:
feature['bis30'] = 0
feature['30-35'] = 0
feature['35-40'] = 0
feature['40-45'] = 0
feature['45-50'] = 0
feature['50-60'] = 0
feature['60-70'] = 1
feature['groeßer70'] = 0
if age >= 70:
feature['bis30'] = 0
feature['30-35'] = 0
feature['35-40'] = 0
feature['40-45'] = 0
feature['45-50'] = 0
feature['50-60'] = 0
feature['60-70'] = 0
feature['groeßer70'] = 1
return feature
#fuer das prior feature mit den ressorts
def normalize_article_ressort_to_dict(article_ressort):
result = {}
for ressort in RESSORTS:
if article_ressort.lower() == ressort.lower():
result[ressort] = 1
else:
result[ressort] = 0
return result
#fuer die user-spezifischen Ressort Features
def normalize_user_ressort_ratings_to_dict(user_information):
result = {}
for i in range(0,5):
result[RESSORTS[i]] = user_information[i+3]
return result
def user_information_vector(user):
result = {}
if user[1]=='m':
result['gender_m'] = 1
result['gender_f'] = 0
else:
result['gender_m'] = 0
result['gender_f'] = 1
result['Mittlere Reife'] = 0
result['Hochschulabschluss'] = 0
result['Abitur'] = 0
result['Sonstiges'] = 0
result[user[2]] = 1
result.update(normalize_age(user[0]))
#informations.append(row.get('geschlecht'))
#informations.append(row.get('abschluss'))
#if user[3] == 1:
# result['interessen_kultur'] = 0
#else:
# result['interessen_kultur'] = 1
#
#if user[4] == 1:
# result['interessen_lokales'] = 0
#else:
# result['interessen_lokales'] = 1
#
#if user[5] == 1:
# result['interessen_lokalsport'] = 0
#else:
# result['interessen_lokalsport'] = 1
#if user[6] == 1:
# result['interessen_politik'] = 0
#else:
# result['interessen_politik'] = 1
#if user[7] == 1:
# result['interessen_sport'] = 0
#else:
# result['interessen_sport'] = 1
#result['interessen_kultur'] = user[3]
#result['interessen_lokales'] = user[4]
#result['interessen_lokalsport'] = user[5]
#result['interessen_politik'] = user[6]
#result['interessen_sport'] = user[7]
return result
def train(database):
annotations = database.getannotations()
features = []
features_0=[]
features_1 = []
bewertungen = []
bewertungen_0 = []
bewertungen_1 = []
annotations_clean = []
artikel_ids = set()
user_ids = set()
for annotation in annotations:
if annotation[2] == 1:
annotations_clean.append(annotation)
if annotation[2] == 4:
annotations_clean.append(annotation)
for annotation in annotations_clean:
artikel_ids.add(annotation[1])
user_ids.add((annotation[0]))
user_informations = {}
for id in user_ids:
user_informations[id] = database.getuserinformations(id)
user_informations[id].append(database.getuserinterests(id)) # die interessen sind als dict gespeichert
article_informations = {}
for id in artikel_ids:
article_informations[id] = database.getannotatedarticleinformations(id)
ressort_list = database.get_ressort_list()
for annotation in annotations:
if annotation[2] == 1 or annotation[2] == 4:
artikel_id = annotation[1]
user_id = annotation[0]
feature = {}
article = article_informations[artikel_id]
user = user_informations[user_id]
normalized_article_ressort = ressort_mapping(article[2])
feature.update(normalize_article_ressort_to_dict(normalized_article_ressort))
feature.update(normalize_pages(article[3]))
feature.update(user_information_vector(user))
user_interest_list = []
for interest_id in user[8].keys(): # erstelle Liste der User-Interessen für den Vergleich mit Text
user_interest_list.append(user[8][interest_id]['name'])
feature.update(compare_string_to_interests(article[0] + " " + article[1], user_interest_list,
mode='user_specific_titel_and_text'))
# cross-feature with ressort and normalized page
cf_age_ressort, cf_sex_ressort, cf_edu_ressort, cf_age_page, cf_sex_page, cf_edu_page \
= compute_cross_features(user[0], user[1], user[2], article[3], article[3])
feature.update(cf_age_ressort)
feature.update(cf_sex_ressort)
feature.update(cf_edu_ressort)
feature.update(cf_age_page)
feature.update(cf_sex_page)
feature.update(cf_edu_page)
normalized_ressort_dict_user = normalize_user_ressort_ratings_to_dict(user)
##User X findet Ressort Y gut und Artikel Z ist aus Ressort Y (5 binaere features)
ressort_specific_dict = user_specific_ressort_ratings(normalized_ressort_dict_user,
normalized_article_ressort)
feature.update(ressort_specific_dict)
# User X findet Ressort Y mit Wertung Z gut und Artikel ist aus Ressort Y (25 binaere features, davon eins = 1)
ressort_specific_dict_with_ratings = user_specific_ressort_explicit_ratings(normalized_ressort_dict_user,
normalized_article_ressort)
feature.update(ressort_specific_dict_with_ratings)
if annotation[2] == 4:
bewertungen_0.append(0)
features_0.append(feature)
else:
bewertungen_1.append(1)
features_1.append(feature)
number = 0
if len(features_0) > len(features_1):
number = len(features_1)
else:
number = len(features_0)
for i in range(0,number):
features.append(features_1[i])
bewertungen.append(bewertungen_1[i])
features.append(features_0[i])
bewertungen.append(bewertungen_0[i])
pickle.dump(features, open("resources/training_features.p", "wb"))
pickle.dump(bewertungen, open("resources/training_features_annotations.p", "wb"))
vec = DictVectorizer()
feature_vectorized = vec.fit_transform(features)
X = np.array(feature_vectorized.toarray())
y = np.array(bewertungen)
clf = svm.SVC(kernel='linear', C=0.1)
scores = cross_validation.cross_val_score(clf, X, y, cv = 10)
value = 0.0
for s in scores:
value+=s
print(scores)
print(value/len(scores))
def ressort_mapping(ressort):
result = ''
if ressort in ['Gütersloh','Bünde','Warburg','Herford','Löhne','Lübbecke','Höxter','Paderborn',
'Enger-Spenge','Bad Oeynhausen','Bielefeld','Schloss Holte', 'Beilagen']:
result = 'Bielefeld'
elif ressort in ['Sport Herford','Sport Bielefeld','Sport Bad Oeynhausen','Sport Paderborn','Sport Bünde',
'Sport Lübbecke','Sport Schloß Holte','Sport Höxter','Sport Gütersloh']:
result = 'Sport Bielefeld'
elif ressort == 'Kultur':
result = 'Kultur'
elif ressort == 'Politik':
result = 'Politik'
elif ressort == 'Sport_Bund':
result = 'Sport_Bund'
return result
#fuer vergleich von interessen mit titel oder text
def compare_string_to_interests(string,interest_list, mode = 'prior_title'):
result = {}
#initialize vector
result[mode + '_interest'] = 0
for interest in interest_list:
#todo hier besser splitten als mit space
for word in string.split():
if interest.lower() in word.lower():
result[mode + '_interest'] = 1
break;
return result
#fuer die crossfeatures mit ressort und page_normalized
def compute_cross_features(user_age, user_sex, user_education, article_page, article_ressort):
cf_age_ressort = {}
cf_sex_ressort = {}
cf_edu_ressort = {}
for ressort in RESSORTS:
if ressort_mapping(article_ressort) == ressort:
for age in NORMALIZED_AGES:
feature = '%s_%s' % (ressort,age)
if normalize_age(user_age) == age:
cf_age_ressort[feature] = 1
else:
cf_age_ressort[feature] = 0
for sex in SEXES:
feature = '%s_%s' % (ressort,sex)
if user_sex == sex:
cf_sex_ressort[feature] = 1
else:
cf_sex_ressort[feature] = 0
for edu in EDUCATIONS:
feature = '%s_%s' % (ressort,edu)
if user_education == edu:
cf_edu_ressort[feature] = 1
else:
cf_edu_ressort[feature] = 0
else:
for age in NORMALIZED_AGES:
feature = "%s_%s" % (ressort,age)
cf_age_ressort[feature] = 0
for sex in SEXES:
feature = "%s_%s" % (ressort,sex)
cf_sex_ressort[feature] = 0
for edu in EDUCATIONS:
feature = "%s_%s" % (ressort,edu)
cf_edu_ressort[feature] = 0
cf_age_page = {}
cf_sex_page = {}
cf_edu_page = {}
for normalized_page in NORMALIZED_PAGES:
if normalize_pages(article_page) == normalized_page:
for age in NORMALIZED_AGES:
feature = '%s_%s' % (normalized_page,age)
if normalize_age(user_age) == age:
cf_age_page[feature] = 1
else:
cf_age_page[feature] = 0
for sex in SEXES:
feature = '%s_%s' % (normalized_page,sex)
if user_sex == sex:
cf_sex_page[feature] = 1
else:
cf_sex_page[feature] = 0
for edu in EDUCATIONS:
feature = '%s_%s' % (normalized_page,edu)
if user_education == edu:
cf_edu_page[feature] = 1
else:
cf_edu_page[feature] = 0
else:
for age in NORMALIZED_AGES:
feature = "%s_%s" % (normalized_page,age)
cf_age_page[feature] = 0
for sex in SEXES:
feature = "%s_%s" % (normalized_page,sex)
cf_sex_page[feature] = 0
for edu in EDUCATIONS:
feature = "%s_%s" % (normalized_page,edu)
cf_edu_page[feature] = 0
return cf_age_ressort,cf_sex_ressort,cf_edu_ressort,cf_age_page,cf_sex_page,cf_edu_page
#User X findet Ressort Y gut und Artikel Z ist aus Ressort Y
def user_specific_ressort_ratings(ressort_ratings_user, ressort_artikel, threshold = 3):
result = {}
for key in ressort_ratings_user:
dict_key = "ressort_specific_%s" % key
if key == ressort_artikel and ressort_ratings_user[key] >= threshold:
result[dict_key] = 1
else:
result[dict_key] = 0
return result
#User X findet Ressort Y mit Wertung Z gut und Artikel ist aus Ressort Y
def user_specific_ressort_explicit_ratings(ressort_ratings_user,ressort_artikel):
result = {}
for ressort in ressort_ratings_user.keys():
feature_name = 'user_specific_ressort_rating_' + ressort +'_'
for j in range(1,6):
feature_name += '%d' % j
if ressort_ratings_user[ressort] == j and ressort_artikel == ressort:
result[feature_name] = 1
else:
result[feature_name] = 0
return result
| mit |
CKPalk/MachineLearning | FinalProject/MachineLearning/KNN/knn.py | 1 | 1351 | ''' Work of Cameron Palk '''
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
def main( argv ):
try:
training_filename = argv[ 1 ]
testing_filename = argv[ 2 ]
output_filename = argv[ 3 ]
except IndexError:
print( "Error, usage: \"python3 {} <training> <testing> <output>\"".format( argv[ 0 ] ) )
return
Training_DataFrame = pd.read_csv( training_filename )
X = Training_DataFrame.ix[:,0:-1]
Y = Training_DataFrame.ix[:,-1]
Testing_DataFrame = pd.read_csv( testing_filename )
test_X = Testing_DataFrame.ix[:,0:-1]
test_Y = Testing_DataFrame.ix[:,-1]
'''
Perceptron
'''
from sklearn.neighbors import KNeighborsClassifier
# Hyper Parameters:
n_neighbors = 5
# Fit Classifier
KNN_classifier = KNeighborsClassifier(
n_neighbors = n_neighbors
)
print( "{} Started training".format( str( datetime.now() ) ) )
KNN_classifier.fit( X, Y )
print( "{} Stopped training".format( str( datetime.now() ) ) )
# Report results
print( "{} Started testing".format( str( datetime.now() ) ) )
score = KNN_classifier.score( test_X, test_Y )
print( "{} Stopped testing".format( str( datetime.now() ) ) )
print( "\nK-NN with {} cluster has Accuracy: {}%".format( n_neighbors, round( score * 100, 3 ) ) )
#
if __name__=='__main__':
main( sys.argv )
| mit |
yyjiang/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
jasonmccampbell/numpy-refactor-sprint | doc/sphinxext/plot_directive.py | 12 | 17831 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback
import sphinx
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_code %}
(`Source code <{{ source_link }}>`__)
.. admonition:: Output
:class: plot-output
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
(
{%- if not source_code -%}
`Source code <{{source_link}}>`__
{%- for fmt in img.formats -%}
, `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- else -%}
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endif -%}
)
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if options.has_key('format'):
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
images = makefig(code, source_file_name, build_dir, output_base,
config)
except PlotError, err:
reporter = state.memo.reporter
sm = reporter.system_message(
3, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
return [sm]
# generate output restructuredtext
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip() for row in code.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in options.items()
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
if sphinx.__version__ >= "0.6":
only_html = ".. only:: html"
only_latex = ".. only:: latex"
else:
only_html = ".. htmlonly::"
only_latex = ".. latexonly::"
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=source_link,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code)
lines = result.split("\n")
if len(lines):
state_machine.insert_input(
lines, state_machine.input_lines.source(0))
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir, os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return []
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
class PlotError(RuntimeError):
pass
def run_code(code, code_path):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
ns = {}
exec setup.config.plot_pre_code in ns
exec code in ns
except (Exception, SystemExit), err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [img]
# Then look for multi-figure output files
images = []
all_exists = True
for i in xrange(1000):
img = ImageFile('%s_%02d' % (output_base, i), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (i > 0)
break
images.append(img)
if all_exists:
return images
# -- We didn't find the files, so build them
# Clear between runs
plt.close('all')
# Run code
run_code(code, code_path)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for i, figman in enumerate(fig_managers):
if len(fig_managers) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d" % (output_base, i), output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException, err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
return images
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
def relpath(target, base=os.curdir):
"""
Return a relative path to the target from either the current
dir or an optional base dir. Base can be a directory
specified either as absolute or relative to current dir.
"""
if not os.path.exists(target):
raise OSError, 'Target does not exist: '+target
if not os.path.isdir(base):
raise OSError, 'Base is not a directory or does not exist: '+base
base_list = (os.path.abspath(base)).split(os.sep)
target_list = (os.path.abspath(target)).split(os.sep)
# On the windows platform the target may be on a completely
# different drive from the base.
if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:
raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()
# Starting from the filepath root, work out how much of the
# filepath is shared by base and target.
for i in range(min(len(base_list), len(target_list))):
if base_list[i] <> target_list[i]: break
else:
# If we broke out of the loop, i is pointing to the first
# differing path elements. If we didn't break out of the
# loop, i is pointing to identical path elements.
# Increment i so that in all cases it points to the first
# differing path elements.
i+=1
rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
return os.path.join(*rel_list)
| bsd-3-clause |
ywcui1990/nupic.research | projects/sequence_prediction/continuous_sequence/run_tm_model.py | 3 | 16411 | ## ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import importlib
from optparse import OptionParser
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.predictionmetricsmanager import MetricsManager
from nupic.frameworks.opf import metrics
# from htmresearch.frameworks.opf.clamodel_custom import CLAModel_custom
import nupic_output
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import yaml
from htmresearch.support.sequence_learning_utils import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
rcParams['pdf.fonttype'] = 42
plt.ion()
DATA_DIR = "./data"
MODEL_PARAMS_DIR = "./model_params"
def getMetricSpecs(predictedField, stepsAhead=5):
_METRIC_SPECS = (
MetricSpec(field=predictedField, metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'negativeLogLikelihood',
'window': 1000, 'steps': stepsAhead}),
MetricSpec(field=predictedField, metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'nrmse', 'window': 1000,
'steps': stepsAhead}),
)
return _METRIC_SPECS
def createModel(modelParams):
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": predictedField})
return model
def getModelParamsFromName(dataSet):
if (dataSet == "nyc_taxi" or
dataSet == "nyc_taxi_perturb" or
dataSet == "nyc_taxi_perturb_baseline"):
importedModelParams = yaml.safe_load(open('model_params/nyc_taxi_model_params.yaml'))
else:
raise Exception("No model params exist for {}".format(dataSet))
return importedModelParams
def _getArgs():
parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]"
"\n\nCompare TM performance with trivial predictor using "
"model outputs in prediction directory "
"and outputting results to result directory.")
parser.add_option("-d",
"--dataSet",
type=str,
default='nyc_taxi',
dest="dataSet",
help="DataSet Name, choose from rec-center-hourly, nyc_taxi")
parser.add_option("-p",
"--plot",
default=False,
dest="plot",
help="Set to True to plot result")
parser.add_option("--stepsAhead",
help="How many steps ahead to predict. [default: %default]",
default=5,
type=int)
parser.add_option("-c",
"--classifier",
type=str,
default='SDRClassifierRegion',
dest="classifier",
help="Classifier Type: SDRClassifierRegion or CLAClassifierRegion")
(options, remainder) = parser.parse_args()
print options
return options, remainder
def getInputRecord(df, predictedField, i):
inputRecord = {
predictedField: float(df[predictedField][i]),
"timeofday": float(df["timeofday"][i]),
"dayofweek": float(df["dayofweek"][i]),
}
return inputRecord
def printTPRegionParams(tpregion):
"""
Note: assumes we are using TemporalMemory/TPShim in the TPRegion
"""
tm = tpregion.getSelf()._tfdr
print "------------PY TemporalMemory Parameters ------------------"
print "numberOfCols =", tm.getColumnDimensions()
print "cellsPerColumn =", tm.getCellsPerColumn()
print "minThreshold =", tm.getMinThreshold()
print "activationThreshold =", tm.getActivationThreshold()
print "newSynapseCount =", tm.getMaxNewSynapseCount()
print "initialPerm =", tm.getInitialPermanence()
print "connectedPerm =", tm.getConnectedPermanence()
print "permanenceInc =", tm.getPermanenceIncrement()
print "permanenceDec =", tm.getPermanenceDecrement()
print "predictedSegmentDecrement=", tm.getPredictedSegmentDecrement()
print
def runMultiplePass(df, model, nMultiplePass, nTrain):
"""
run CLA model through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
result = model.run(inputRecord)
if j % 100 == 0:
print " pass %i, record %i" % (nPass, j)
# reset temporal memory
model._getTPRegion().getSelf()._tfdr.reset()
return model
def runMultiplePassSPonly(df, model, nMultiplePass, nTrain):
"""
run CLA model SP through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
model._sensorCompute(inputRecord)
model._spCompute()
if j % 400 == 0:
print " pass %i, record %i" % (nPass, j)
return model
def movingAverage(a, n):
movingAverage = []
for i in xrange(len(a)):
start = max(0, i - n)
values = a[start:i+1]
movingAverage.append(sum(values) / float(len(values)))
return movingAverage
if __name__ == "__main__":
(_options, _args) = _getArgs()
dataSet = _options.dataSet
plot = _options.plot
classifierType = _options.classifier
if dataSet == "rec-center-hourly":
DATE_FORMAT = "%m/%d/%y %H:%M" # '7/2/10 0:00'
predictedField = "kw_energy_consumption"
elif dataSet == "nyc_taxi" or dataSet == "nyc_taxi_perturb" or dataSet =="nyc_taxi_perturb_baseline":
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
predictedField = "passenger_count"
else:
raise RuntimeError("un recognized dataset")
modelParams = getModelParamsFromName(dataSet)
modelParams['modelParams']['clParams']['steps'] = str(_options.stepsAhead)
modelParams['modelParams']['clParams']['regionName'] = classifierType
print "Creating model from %s..." % dataSet
# use customized CLA model
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": predictedField})
model.enableLearning()
model._spLearningEnabled = True
model._tpLearningEnabled = True
printTPRegionParams(model._getTPRegion())
inputData = "%s/%s.csv" % (DATA_DIR, dataSet.replace(" ", "_"))
sensor = model._getSensorRegion()
encoderList = sensor.getSelf().encoder.getEncoderList()
if sensor.getSelf().disabledEncoder is not None:
classifier_encoder = sensor.getSelf().disabledEncoder.getEncoderList()
classifier_encoder = classifier_encoder[0]
else:
classifier_encoder = None
_METRIC_SPECS = getMetricSpecs(predictedField, stepsAhead=_options.stepsAhead)
metric = metrics.getModule(_METRIC_SPECS[0])
metricsManager = MetricsManager(_METRIC_SPECS, model.getFieldInfo(),
model.getInferenceType())
if plot:
plotCount = 1
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
plt.title(predictedField)
plt.ylabel('Data')
plt.xlabel('Timed')
plt.tight_layout()
plt.ion()
print "Load dataset: ", dataSet
df = pd.read_csv(inputData, header=0, skiprows=[1, 2])
nMultiplePass = 5
nTrain = 5000
print " run SP through the first %i samples %i passes " %(nMultiplePass, nTrain)
model = runMultiplePassSPonly(df, model, nMultiplePass, nTrain)
model._spLearningEnabled = False
maxBucket = classifier_encoder.n - classifier_encoder.w + 1
likelihoodsVecAll = np.zeros((maxBucket, len(df)))
prediction_nstep = None
time_step = []
actual_data = []
patternNZ_track = []
predict_data = np.zeros((_options.stepsAhead, 0))
predict_data_ML = []
negLL_track = []
activeCellNum = []
predCellNum = []
predSegmentNum = []
predictedActiveColumnsNum = []
trueBucketIndex = []
sp = model._getSPRegion().getSelf()._sfdr
spActiveCellsCount = np.zeros(sp.getColumnDimensions())
output = nupic_output.NuPICFileOutput([dataSet])
for i in xrange(len(df)):
inputRecord = getInputRecord(df, predictedField, i)
tp = model._getTPRegion()
tm = tp.getSelf()._tfdr
prePredictiveCells = tm.getPredictiveCells()
prePredictiveColumn = np.array(list(prePredictiveCells)) / tm.cellsPerColumn
result = model.run(inputRecord)
trueBucketIndex.append(model._getClassifierInputRecord(inputRecord).bucketIndex)
predSegmentNum.append(len(tm.activeSegments))
sp = model._getSPRegion().getSelf()._sfdr
spOutput = model._getSPRegion().getOutputData('bottomUpOut')
spActiveCellsCount[spOutput.nonzero()[0]] += 1
activeDutyCycle = np.zeros(sp.getColumnDimensions(), dtype=np.float32)
sp.getActiveDutyCycles(activeDutyCycle)
overlapDutyCycle = np.zeros(sp.getColumnDimensions(), dtype=np.float32)
sp.getOverlapDutyCycles(overlapDutyCycle)
if i % 100 == 0 and i > 0:
plt.figure(1)
plt.clf()
plt.subplot(2, 2, 1)
plt.hist(overlapDutyCycle)
plt.xlabel('overlapDutyCycle')
plt.subplot(2, 2, 2)
plt.hist(activeDutyCycle)
plt.xlabel('activeDutyCycle-1000')
plt.subplot(2, 2, 3)
plt.hist(spActiveCellsCount)
plt.xlabel('activeDutyCycle-Total')
plt.draw()
tp = model._getTPRegion()
tm = tp.getSelf()._tfdr
tpOutput = tm.infActiveState['t']
predictiveCells = tm.getPredictiveCells()
predCellNum.append(len(predictiveCells))
predColumn = np.array(list(predictiveCells))/ tm.cellsPerColumn
patternNZ = tpOutput.reshape(-1).nonzero()[0]
activeColumn = patternNZ / tm.cellsPerColumn
activeCellNum.append(len(patternNZ))
predictedActiveColumns = np.intersect1d(prePredictiveColumn, activeColumn)
predictedActiveColumnsNum.append(len(predictedActiveColumns))
result.metrics = metricsManager.update(result)
negLL = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='negativeLogLikelihood':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
if i % 100 == 0 and i>0:
negLL = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='negativeLogLikelihood':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
nrmse = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='nrmse':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
numActiveCell = np.mean(activeCellNum[-100:])
numPredictiveCells = np.mean(predCellNum[-100:])
numCorrectPredicted = np.mean(predictedActiveColumnsNum[-100:])
print "After %i records, %d-step negLL=%f nrmse=%f ActiveCell %f PredCol %f CorrectPredCol %f" % \
(i, _options.stepsAhead, negLL, nrmse, numActiveCell,
numPredictiveCells, numCorrectPredicted)
last_prediction = prediction_nstep
prediction_nstep = \
result.inferences["multiStepBestPredictions"][_options.stepsAhead]
output.write([i], [inputRecord[predictedField]], [float(prediction_nstep)])
bucketLL = \
result.inferences['multiStepBucketLikelihoods'][_options.stepsAhead]
likelihoodsVec = np.zeros((maxBucket,))
if bucketLL is not None:
for (k, v) in bucketLL.items():
likelihoodsVec[k] = v
time_step.append(i)
actual_data.append(inputRecord[predictedField])
predict_data_ML.append(
result.inferences['multiStepBestPredictions'][_options.stepsAhead])
negLL_track.append(negLL)
likelihoodsVecAll[0:len(likelihoodsVec), i] = likelihoodsVec
if plot and i > 500:
# prepare data for display
if i > 100:
time_step_display = time_step[-500:-_options.stepsAhead]
actual_data_display = actual_data[-500+_options.stepsAhead:]
predict_data_ML_display = predict_data_ML[-500:-_options.stepsAhead]
likelihood_display = likelihoodsVecAll[:, i-499:i-_options.stepsAhead+1]
xl = [(i)-500, (i)]
else:
time_step_display = time_step
actual_data_display = actual_data
predict_data_ML_display = predict_data_ML
likelihood_display = likelihoodsVecAll[:, :i+1]
xl = [0, (i)]
plt.figure(2)
plt.clf()
plt.imshow(likelihood_display,
extent=(time_step_display[0], time_step_display[-1], 0, 40000),
interpolation='nearest', aspect='auto',
origin='lower', cmap='Reds')
plt.colorbar()
plt.plot(time_step_display, actual_data_display, 'k', label='Data')
plt.plot(time_step_display, predict_data_ML_display, 'b', label='Best Prediction')
plt.xlim(xl)
plt.xlabel('Time')
plt.ylabel('Prediction')
# plt.title('TM, useTimeOfDay='+str(True)+' '+dataSet+' test neg LL = '+str(np.nanmean(negLL)))
plt.xlim([17020, 17300])
plt.ylim([0, 30000])
plt.clim([0, 1])
plt.draw()
predData_TM_n_step = np.roll(np.array(predict_data_ML), _options.stepsAhead)
nTest = len(actual_data) - nTrain - _options.stepsAhead
NRMSE_TM = NRMSE(actual_data[nTrain:nTrain+nTest], predData_TM_n_step[nTrain:nTrain+nTest])
print "NRMSE on test data: ", NRMSE_TM
output.close()
# calculate neg-likelihood
predictions = np.transpose(likelihoodsVecAll)
truth = np.roll(actual_data, -5)
from nupic.encoders.scalar import ScalarEncoder as NupicScalarEncoder
encoder = NupicScalarEncoder(w=1, minval=0, maxval=40000, n=22, forced=True)
from plot import computeLikelihood, plotAccuracy
bucketIndex2 = []
negLL = []
minProb = 0.0001
for i in xrange(len(truth)):
bucketIndex2.append(np.where(encoder.encode(truth[i]))[0])
outOfBucketProb = 1 - sum(predictions[i,:])
prob = predictions[i, bucketIndex2[i]]
if prob == 0:
prob = outOfBucketProb
if prob < minProb:
prob = minProb
negLL.append( -np.log(prob))
negLL = computeLikelihood(predictions, truth, encoder)
negLL[:5000] = np.nan
x = range(len(negLL))
plt.figure()
plotAccuracy((negLL, x), truth, window=480, errorType='negLL')
np.save('./result/'+dataSet+classifierType+'TMprediction.npy', predictions)
np.save('./result/'+dataSet+classifierType+'TMtruth.npy', truth)
plt.figure()
activeCellNumAvg = movingAverage(activeCellNum, 100)
plt.plot(np.array(activeCellNumAvg)/tm.numberOfCells())
plt.xlabel('data records')
plt.ylabel('sparsity')
plt.xlim([0, 5000])
plt.savefig('result/sparsity_over_training.pdf')
plt.figure()
predCellNumAvg = movingAverage(predCellNum, 100)
predSegmentNumAvg = movingAverage(predSegmentNum, 100)
# plt.plot(np.array(predCellNumAvg))
plt.plot(np.array(predSegmentNumAvg),'r', label='NMDA spike')
plt.plot(activeCellNumAvg,'b', label='spikes')
plt.xlabel('data records')
plt.ylabel('NMDA spike #')
plt.legend()
plt.xlim([0, 5000])
plt.ylim([0, 42])
plt.savefig('result/nmda_spike_over_training.pdf') | agpl-3.0 |
oemof/reegis-hp | reegis_hp/waermetool/heat_demand.py | 3 | 4420 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 23 14:35:28 2016
@author: uwe
"""
import logging
import time
import os
import pandas as pd
import numpy as np
from oemof.tools import logger
logger.define_logging()
start = time.time()
sync_path = '/home/uwe/chiba/RLI/data'
basic_path = os.path.join(os.path.expanduser('~'), '.reegis_hp', 'heat_demand')
if not os.path.isdir(os.path.join(os.path.expanduser('~'), '.reegis_hp')):
os.mkdir(os.path.join(os.path.expanduser('~'), '.reegis_hp'))
if not os.path.isdir(basic_path):
os.mkdir(basic_path)
filepath = os.path.join(basic_path, "waermetool_berlin.hdf")
# Define names of standardised buildings
std_buildings = ['EFHv84', 'EFHn84', 'MFHv84', 'MFHn84', 'Platte']
sanierungsquote = np.array([0.12, 0.03, 0.08, 0.01, 0.29])
logging.info("Datapath: {0}:".format(basic_path))
# Load the yearly demand of the standardised buildings
wt_demand = pd.read_csv(os.path.join(sync_path, 'waermetool_demand.csv'),
index_col=0)
# Load assignment of standardised building types to all area types
iwu4types = pd.read_csv(os.path.join(sync_path, 'iwu_typen.csv'), index_col=0)
# Load list of area types with full name and "typklar" name from db
blocktype = pd.read_csv(os.path.join(sync_path, 'blocktype.csv'), ';',
index_col=0)
# Load "stadtnutzung" from SenStadt extended by residents and population density
stadtnutzung = pd.read_csv(
os.path.join(sync_path, 'stadtnutzung_erweitert.csv'), index_col=0)
# Merge "typklar" as blocktype to fraction of each iwu-type
iwu4block = iwu4types.merge(blocktype, left_index=True, right_index=True)
# Merge fraction of building types to all blocks
stadtnutzung_full = stadtnutzung.merge(iwu4block, right_on='blocktype',
left_on='typklar')
# Determine the living area
stadtnutzung_full['living_area'] = (stadtnutzung_full.ew *
stadtnutzung_full.wohnflaeche_pro_ew)
# Determine the demand by type
demand_by_type_unsaniert = pd.DataFrame(
(stadtnutzung_full[[
'EFHv84', 'EFHn84', 'MFHv84', 'MFHn84', 'Platte']].multiply(
stadtnutzung_full.living_area, axis="index").values *
wt_demand['unsaniert'].values * (1 - sanierungsquote)),
columns=['EFHv84', 'EFHn84', 'MFHv84', 'MFHn84', 'Platte']).merge(
stadtnutzung_full[['spatial_na', 'schluessel_planungsraum']],
left_index=True, right_index=True)
demand_by_type_saniert = pd.DataFrame(
(stadtnutzung_full[[
'EFHv84', 'EFHn84', 'MFHv84', 'MFHn84', 'Platte']].multiply(
stadtnutzung_full.living_area, axis="index").values *
wt_demand['saniert'].values * sanierungsquote),
columns=['EFHv84', 'EFHn84', 'MFHv84', 'MFHn84', 'Platte']).merge(
stadtnutzung_full[['spatial_na', 'schluessel_planungsraum']],
left_index=True, right_index=True)
total_demand_wt = (demand_by_type_saniert[std_buildings].sum().sum() +
demand_by_type_unsaniert[std_buildings].sum().sum())
demand_by_type = demand_by_type_unsaniert.merge(
demand_by_type_saniert, left_index=True, right_index=True,
suffixes=('_unsaniert', '_saniert'))
for typ in std_buildings:
demand_by_type[typ] = (demand_by_type[typ + '_unsaniert'] +
demand_by_type[typ + '_saniert'])
demand_by_type.rename(columns={
'schluessel_planungsraum_saniert': 'schluessel_planungsraum',
'schluessel_planungsraum_unsaniert': 'plr_key',
'spatial_na_saniert': 'spatial_na'}, inplace=True)
demand_by_type.drop('spatial_na_unsaniert', 1, inplace=True)
demand_by_type['total'] = 0
demand_by_type['plr_key'].fillna(0, inplace=True)
demand_by_type['plr_key'] = demand_by_type['plr_key'].astype(int)
demand_by_type['plr_key'] = demand_by_type['plr_key'].apply('{:0>8}'.format)
for std_bld in std_buildings:
demand_by_type['total'] += demand_by_type[std_bld]
# Store results to hdf5 file
logging.info("Store results to {0}".format(filepath))
store = pd.HDFStore(filepath)
store['wt'] = demand_by_type
store['sanierungsquote'] = pd.DataFrame(
sanierungsquote, index=std_buildings, columns=['anteil_saniert'])
store.close()
demand_by_type.to_csv("/home/uwe/waermetool.csv")
wt_plr = pd.DataFrame(
demand_by_type.groupby('schluessel_planungsraum')['total'].sum())
wt_plr.to_hdf('/home/uwe/demand_plr', 'wt')
print(time.time() - start)
| gpl-3.0 |
bthirion/scikit-learn | examples/linear_model/plot_ransac.py | 103 | 1797 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
lr = linear_model.LinearRegression()
lr.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
ransac = linear_model.RANSACRegressor()
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(X.min(), X.max())[:, np.newaxis]
line_y = lr.predict(line_X)
line_y_ransac = ransac.predict(line_X)
# Compare estimated coefficients
print("Estimated coefficients (true, linear regression, RANSAC):")
print(coef, lr.coef_, ransac.estimator_.coef_)
lw = 2
plt.scatter(X[inlier_mask], y[inlier_mask], color='yellowgreen', marker='.',
label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask], color='gold', marker='.',
label='Outliers')
plt.plot(line_X, line_y, color='navy', linewidth=lw, label='Linear regressor')
plt.plot(line_X, line_y_ransac, color='cornflowerblue', linewidth=lw,
label='RANSAC regressor')
plt.legend(loc='lower right')
plt.xlabel("Input")
plt.ylabel("Response")
plt.show()
| bsd-3-clause |
josenavas/qiime | scripts/identify_paired_differences.py | 15 | 9191 | #!/usr/bin/env python
# File created on 19 Jun 2013
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2013, The QIIME project"
__credits__ = ["Greg Caporaso", "Jose Carlos Clemente Litran"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from biom import load_table
from qiime.group import (
extract_per_individual_state_metadata_from_sample_metadata,
extract_per_individual_state_metadata_from_sample_metadata_and_biom)
from qiime.parse import parse_mapping_file_to_dict
from qiime.util import (parse_command_line_parameters,
make_option)
from qiime.filter import sample_ids_from_metadata_description
from qiime.stats import paired_difference_analyses
script_info = {}
script_info[
'brief_description'] = "Generate plots and stats to test for change in some data point(s) with a state change on a per-individual basis."
script_info[
'script_description'] = "This script provides a framework for paired-difference testing (i.e., analysis of data generated under a pre/post experimental design). In a pre/post experimental design, individuals are sampled before and after some 'treatment'. This code plots differences in values in the sample metadata (i.e., the mapping file) or observation counts in a BIOM table, and runs a (Bonferroni-corrected) one sample t-test on each sample metadata category or BIOM observation to determine if the mean of each distribution of pre/post differences differs from zero. If 'None' appears for the t score and p-values, this often means that the distribution of differences contained no variance, so the t-test could not be run. This can happen, for example, if the value passed for --valid_states is so restrictive that only a single sample is retained for analysis."
script_info['script_usage'] = []
script_info['script_usage'].append(
("Generate plots and stats for one category from the mapping file where the y-axis should be consistent across plots and the lines in the plots should be light blue.",
"",
"%prog -m map.txt --metadata_categories 'Streptococcus Abundance' --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o taxa_results --ymin 0 --ymax 60 --line_color '#eeefff'"))
script_info['script_usage'].append(
("Generate plots and stats for three categories from the mapping file.",
"",
"%prog -m map.txt --metadata_categories 'Streptococcus Abundance,Phylogenetic Diversity,Observed OTUs' --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o taxa_and_alpha_results"))
script_info['script_usage'].append(
("Generate plots for all observations in a biom file",
"",
"%prog -m map.txt -b otu_table.biom --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o otu_results"))
script_info['script_usage'].append(
("Generate plots for all observations in a biom file, but only including samples from individuals whose 'TreatmentResponse' was 'Improved' (as defined in the mapping file).",
"",
"%prog -m map.txt -b otu_table.biom --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o otu_results_improved_only --valid_states TreatmentResponse:Improved"))
script_info[
'output_description'] = "The output of this script is plots of pre/post differences and associated statistics."
script_info['required_options'] = [
make_option(
'-m',
'--mapping_fp',
type="existing_filepath",
help='the input metadata map filepath'),
make_option(
'-o',
'--output_dir',
type="new_filepath",
help='directory where output files should be saved'),
make_option(
'-t',
'--state_category',
help='the mapping file column name to plot change over (usually has values like "pre-treatment" and "post-treatment")'),
make_option(
'-x',
'--state_values',
help='ordered list of state values to test change over (defines direction of graphs, generally something like "pre-treatment,post-treatment"). currently limited to two states.'),
make_option(
'-c',
'--individual_id_category',
help='the mapping file column name containing each individual\'s identifier (usually something like "personal_identifier")'),
]
script_info['optional_options'] = [
make_option(
'--ymin',
default=None,
type='float',
help='set the minimum y-value across plots [default: determined on a per-plot basis]'),
make_option(
'--ymax',
default=None,
type='float',
help='set the maximum y-value across plots [default: determined on a per-plot basis]'),
make_option(
'--metadata_categories',
help='ordered list of the mapping file column names to test for paired differences (usually something like "StreptococcusAbundance,Phylogenetic Diversity") [default: %default]',
default=None),
make_option(
'--observation_ids',
help='ordered list of the observation ids to test for paired differences if a biom table is provided (usually something like "otu1,otu2") [default: compute paired differences for all observation ids]',
default=None),
make_option(
'-b',
'--biom_table_fp',
help='path to biom table to use for computing paired differences [default: %default]',
type='existing_filepath',
default=None),
make_option(
'-s',
'--valid_states',
help="string describing samples that should be included based on their metadata (e.g. 'TreatmentResponse:Improved') [default: all samples are included in analysis]",
default=None),
make_option(
'--line_color',
help="color of lines in plots, useful if generating multiple plots in different runs of this script to overlay on top of one another. these can be specified as matplotlib color names, or as html hex strings [default: %default]",
default="black"),
]
script_info['version'] = __version__
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
mapping_fp = opts.mapping_fp
state_values = opts.state_values.split(',')
metadata_categories = opts.metadata_categories
state_category = opts.state_category
individual_id_category = opts.individual_id_category
output_dir = opts.output_dir
biom_table_fp = opts.biom_table_fp
observation_ids = opts.observation_ids
if not observation_ids is None:
observation_ids = observation_ids.split(',')
valid_states = opts.valid_states
ymin = opts.ymin
ymax = opts.ymax
line_color = opts.line_color
# validate the input - currently only supports either biom data
# or mapping file data. if useful in the future it shouldn't be too
# hard to allow the user to provide both.
if metadata_categories and biom_table_fp:
option_parser.error(
"Can only pass --metadata_categories or --biom_table_fp, not both.")
elif not (metadata_categories or biom_table_fp):
option_parser.error(
"Must pass either --metadata_categories or --biom_table_fp.")
else:
pass
# parse the mapping file to a dict
mapping_data = parse_mapping_file_to_dict(open(mapping_fp, 'U'))[0]
# currently only support for pre/post (ie, two-state) tests
if len(state_values) != 2:
option_parser.error(
"Exactly two state_values must be passed separated by a comma.")
# filter mapping_data, if requested
if valid_states:
sample_ids_to_keep = sample_ids_from_metadata_description(
open(mapping_fp, 'U'), valid_states)
for sid in mapping_data.keys():
if sid not in sample_ids_to_keep:
del mapping_data[sid]
if biom_table_fp:
biom_table = load_table(biom_table_fp)
analysis_categories = observation_ids or biom_table.ids(axis='observation')
personal_ids_to_state_values = \
extract_per_individual_state_metadata_from_sample_metadata_and_biom(
mapping_data,
biom_table,
state_category,
state_values,
individual_id_category,
observation_ids=analysis_categories)
else:
analysis_categories = metadata_categories.split(',')
personal_ids_to_state_values = \
extract_per_individual_state_metadata_from_sample_metadata(
mapping_data,
state_category,
state_values,
individual_id_category,
analysis_categories)
paired_difference_analyses(personal_ids_to_state_values,
analysis_categories,
state_values,
output_dir,
line_color=line_color,
ymin=ymin,
ymax=ymax)
if __name__ == "__main__":
main()
| gpl-2.0 |
AISpace2/AISpace2 | aipython/cspSLSPlot.py | 1 | 12720 | # cspSLS.py - Stochastic Local Search for Solving CSPs
# AIFCA Python3 code Version 0.7.1 Documentation at http://aipython.org
# Artificial Intelligence: Foundations of Computational Agents
# http://artint.info
# Copyright David L Poole and Alan K Mackworth 2017.
# This work is licensed under a Creative Commons
# Attribution-NonCommercial-ShareAlike 4.0 International License.
# See: http://creativecommons.org/licenses/by-nc-sa/4.0/deed.en
import heapq
import random
import matplotlib.pyplot as plt
from aipython.cspProblem import CSP, Constraint
from aipython.searchProblem import Arc, Search_problem
from aipython.utilities import Displayable
class SLSearcher(Displayable):
"""A search problem directly from the CSP..
A node is a variable:value dictionary"""
def __init__(self, csp):
self.csp = csp
self.variables_to_select = {var for var in self.csp.variables
if len(self.csp.domains[var]) > 1}
# Create assignment and conflicts set
self.current_assignment = None # this will trigger a random restart
self.number_of_steps = 1 # number of steps after the initialization
super().__init__()
def restart(self):
"""creates a new total assignment and the conflict set
"""
self.current_assignment = {var: random_sample(dom) for (var, dom) in self.csp.domains.items()}
self.display(2, "Initial assignment", self.current_assignment)
self.conflicts = set()
for con in self.csp.constraints:
if not con.holds(self.current_assignment):
self.conflicts.add(con)
self.display(2, "Conflicts:", self.conflicts)
self.variable_pq = None
def search(self, max_steps=500, prob_best=1.0, prob_anycon=1.0):
"""
returns the number of steps or None if these is no solution
if there is a solution, it can be found in self.current_assignment
"""
if self.current_assignment is None:
self.restart()
self.number_of_steps += 1
if not self.conflicts:
return self.number_of_steps
if prob_best > 0: # we need to maintain a variable priority queue
return self.search_with_var_pq(max_steps, prob_best, prob_anycon)
else:
return self.search_with_any_conflict(max_steps, prob_anycon)
def search_with_any_conflict(self, max_steps, prob_anycon=1.0):
"""Searches with the any_conflict heuristic.
This relies on just maintaining the set of conflicts;
it does not maintain a priority queue
"""
self.variable_pq = None # we are not maintaining the priority queue.
# This ensures it is regenerated if needed.
for i in range(max_steps):
self.number_of_steps += 1
if random.random() < prob_anycon:
con = random_sample(self.conflicts) # pick random conflict
var = random_sample(con.scope) # pick variable in conflict
else:
var = random_sample(self.variables_to_select)
if len(self.csp.domains[var]) > 1:
val = random_sample(self.csp.domains[var] -
{self.current_assignment[var]})
self.display(2, "Assigning", var, "=", val)
self.current_assignment[var] = val
for varcon in self.csp.var_to_const[var]:
if varcon.holds(self.current_assignment):
if varcon in self.conflicts:
self.conflicts.remove(varcon)
self.display(3, "Became consistent", varcon)
else:
self.display(3, "Still consistent", varcon)
else:
if varcon not in self.conflicts:
self.conflicts.add(varcon)
self.display(3, "Became inconsistent", varcon)
else:
self.display(3, "Still inconsistent", varcon)
self.display(2, "Conflicts:", self.conflicts)
if not self.conflicts:
self.display(1, "Solution found", self.current_assignment,
"in", self.number_of_steps, "steps")
return self.number_of_steps
self.display(1, "No solution in", self.number_of_steps, "steps",
len(self.conflicts), "conflicts remain")
return None
def search_with_var_pq(self, max_steps, prob_best=1.0, prob_anycon=1.0):
"""search with a priority queue of variables.
This is used to select a variable with the most conflicts.
"""
if not self.variable_pq:
self.create_pq()
pick_best_or_con = prob_best + prob_anycon
for i in range(max_steps):
self.number_of_steps += 1
randnum = random.random()
# Pick a variable
if randnum < prob_best: # pick best variable
var, oldval = self.variable_pq.top()
elif randnum < pick_best_or_con: # pick a variable in a conflict
con = random_sample(self.conflicts)
var = random_sample(con.scope)
else: # pick any variable that can be selected
var = random_sample(self.variables_to_select)
if len(self.csp.domains[var]) > 1: # var has other values
# Pick a value
val = random_sample(self.csp.domains[var] - {self.current_assignment[var]})
self.display(2, "Assigning", var, "=", val)
# Update the priority queue
var_differential = {}
self.current_assignment[var] = val
for varcon in self.csp.var_to_const[var]:
self.display(3, "Checking", varcon)
if varcon.holds(self.current_assignment):
if varcon in self.conflicts: # was incons, now consis
self.display(3, "Became consistent", varcon)
self.conflicts.remove(varcon)
for v in varcon.scope: # v is in one fewer conflicts
var_differential[v] = var_differential.get(v, 0) - 1
else:
self.display(3, "Still consistent", varcon)
else:
if varcon not in self.conflicts: # was consis, not now
self.display(3, "Became inconsistent", varcon)
self.conflicts.add(varcon)
for v in varcon.scope: # v is in one more conflicts
var_differential[v] = var_differential.get(v, 0) + 1
else:
self.display(3, "Still inconsistent", varcon)
self.variable_pq.update_each_priority(var_differential)
self.display(2, "Conflicts:", self.conflicts)
if not self.conflicts: # no conflicts, so solution found
self.display(1, "Solution found", self.current_assignment, "in", self.number_of_steps, "steps")
return self.number_of_steps
self.display(1, "No solution in", self.number_of_steps, "steps", len(self.conflicts), "conflicts remain")
return None
def create_pq(self):
"""Create the variable to number-of-conflicts priority queue.
This is needed to select the variable in the most conflicts.
The value of a variable in the priority queue is the negative of the
number of conflicts the variable appears in.
"""
self.variable_pq = Updatable_priority_queue()
var_to_number_conflicts = {}
for con in self.conflicts:
for var in con.scope:
var_to_number_conflicts[var] = var_to_number_conflicts.get(var, 0) + 1
for var, num in var_to_number_conflicts.items():
if num > 0:
self.variable_pq.add(var, -num)
def random_sample(st):
"""selects a random element from set st"""
return random.sample(st, 1)[0]
class Updatable_priority_queue(object):
"""A priority queue where the values can be updated.
Elements with the same value are ordered randomly.
This code is based on the ideas described in
http://docs.python.org/3.3/library/heapq.html
It could probably be done more efficiently by
shuffling the modified element in the heap.
"""
def __init__(self):
self.pq = [] # priority queue of [val,rand,elt] triples
self.elt_map = {} # map from elt to [val,rand,elt] triple in pq
self.REMOVED = "*removed*" # a string that won't be a legal element
self.max_size = 0
def add(self, elt, val):
"""adds elt to the priority queue with priority=val.
"""
assert val <= 0, val
assert elt not in self.elt_map, elt
new_triple = [val, random.random(), elt]
heapq.heappush(self.pq, new_triple)
self.elt_map[elt] = new_triple
def remove(self, elt):
"""remove the element from the priority queue"""
if elt in self.elt_map:
self.elt_map[elt][2] = self.REMOVED
del self.elt_map[elt]
def update_each_priority(self, update_dict):
"""update values in the priority queue by subtracting the values in
update_dict from the priority of those elements in priority queue.
"""
for elt, incr in update_dict.items():
if incr != 0:
newval = self.elt_map.get(elt, [0])[0] - incr
assert newval <= 0, str(elt) + ":" + str(newval + incr) + "-" + str(incr)
self.remove(elt)
if newval != 0:
self.add(elt, newval)
def pop(self):
"""Removes and returns the (elt,value) pair with minimal value.
If the priority queue is empty, IndexError is raised.
"""
self.max_size = max(self.max_size, len(self.pq)) # keep statistics
triple = heapq.heappop(self.pq)
while triple[2] == self.REMOVED:
triple = heapq.heappop(self.pq)
del self.elt_map[triple[2]]
return triple[2], triple[0] # elt, value
def top(self):
"""Returns the (elt,value) pair with minimal value, without removing it.
If the priority queue is empty, IndexError is raised.
"""
self.max_size = max(self.max_size, len(self.pq)) # keep statistics
triple = self.pq[0]
while triple[2] == self.REMOVED:
heapq.heappop(self.pq)
triple = self.pq[0]
return triple[2], triple[0] # elt, value
def empty(self):
"""returns True iff the priority queue is empty"""
return all(triple[2] == self.REMOVED for triple in self.pq)
class Runtime_distribution(object):
def __init__(self, csp, xscale='log'):
"""Sets up plotting for csp
xscale is either 'linear' or 'log'
"""
self.csp = csp
plt.ion()
plt.xlabel("Number of Steps")
plt.ylabel("Cumulative Number of Runs")
plt.xscale(xscale) # Makes a 'log' or 'linear' scale
def plot_run(self, num_runs=100, max_steps=1000, prob_best=1.0, prob_anycon=1.0):
stats = []
SLSearcher.max_display_level, temp_mdl = 0, SLSearcher.max_display_level # no display
for i in range(num_runs):
searcher = SLSearcher(self.csp)
num_steps = searcher.search(max_steps, prob_best, prob_anycon)
if num_steps:
stats.append(num_steps)
searcher.max_display_level = temp_mdl # restore display
stats.sort()
if prob_best >= 1.0:
label = "P(best)=1.0"
else:
p_ac = min(prob_anycon, 1 - prob_best)
label = "P(best)=%.2f, P(ac)=%.2f" % (prob_best, p_ac)
plt.plot(stats, range(len(stats)), label=label)
plt.legend(loc="upper left")
# plt.draw()
SLSearcher.max_display_level = temp_mdl # restore display
def sls_solver(csp, prob_best=0.7):
"""stochastic local searcher"""
se0 = SLSearcher(csp)
se0.search(1000, prob_best)
return se0.current_assignment
def any_conflict_solver(csp):
"""stochastic local searcher (any-conflict)"""
return sls_solver(csp, 0)
if __name__ == "__main__":
test(sls_solver)
test(any_conflict_solver)
# Test
#p = Runtime_distribution(extended_csp)
# p.plot_run(100,1000,0)
# p.plot_run(100,1000,1.0)
# p.plot_run(100,1000,0.7)
| gpl-3.0 |
manifoldai/merf | setup.py | 1 | 1024 | from setuptools import setup, find_packages
def readme():
with open("README.md") as f:
return f.read()
def read_version(filename='VERSION'):
with open(filename, 'r') as f:
return f.readline()
setup(
name="merf",
version=read_version(),
description="Mixed Effects Random Forest",
long_description=readme(),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="random forest machine learning mixed effects",
url="https://github.com/manifoldai/merf",
author="Manifold, Inc.",
author_email="sdey@manifold.ai",
license="MIT",
python_requires='>=3.6',
packages=find_packages(),
install_requires=["pandas>=1.0", "numpy", "scikit-learn", "matplotlib>=3.0"],
include_package_data=True,
zip_safe=False,
)
| mit |
amsjavan/nazarkav | nazarkav/tests/test_nazarkav.py | 1 | 4116 | import os.path as op
import numpy as np
import pandas as pd
import numpy.testing as npt
import nazarkav as sb
data_path = op.join(sb.__path__[0], 'data')
def test_transform_data():
"""
Testing the transformation of the data from raw data to functions
used for fitting a function.
"""
# We start with actual data. We test here just that reading the data in
# different ways ultimately generates the same arrays.
from matplotlib import mlab
ortho = mlab.csv2rec(op.join(data_path, 'ortho.csv'))
para = mlab.csv2rec(op.join(data_path, 'para.csv'))
x1, y1, n1 = sb.transform_data(ortho)
x2, y2, n2 = sb.transform_data(op.join(data_path, 'ortho.csv'))
npt.assert_equal(x1, x2)
npt.assert_equal(y1, y2)
# We can also be a bit more critical, by testing with data that we
# generate, and should produce a particular answer:
my_data = pd.DataFrame(
np.array([[0.1, 2], [0.1, 1], [0.2, 2], [0.2, 2], [0.3, 1],
[0.3, 1]]),
columns=['contrast1', 'answer'])
my_x, my_y, my_n = sb.transform_data(my_data)
npt.assert_equal(my_x, np.array([0.1, 0.2, 0.3]))
npt.assert_equal(my_y, np.array([0.5, 0, 1.0]))
npt.assert_equal(my_n, np.array([2, 2, 2]))
def test_cum_gauss():
sigma = 1
mu = 0
x = np.linspace(-1, 1, 12)
y = sb.cumgauss(x, mu, sigma)
# A basic test that the input and output have the same shape:
npt.assert_equal(y.shape , x.shape)
# The function evaluated over items symmetrical about mu should be
# symmetrical relative to 0 and 1:
npt.assert_equal(y[0], 1 - y[-1])
# Approximately 68% of the Gaussian distribution is in mu +/- sigma, so
# the value of the cumulative Gaussian at mu - sigma should be
# approximately equal to (1 - 0.68/2). Note the low precision!
npt.assert_almost_equal(y[0], (1 - 0.68) / 2, decimal=2)
def test_opt_err_func():
# We define a truly silly function, that returns its input, regardless of
# the params:
def my_silly_func(x, my_first_silly_param, my_other_silly_param):
return x
# The silly function takes two parameters and ignores them
my_params = [1, 10]
my_x = np.linspace(-1, 1, 12)
my_y = my_x
my_err = sb.opt_err_func(my_params, my_x, my_y, my_silly_func)
# Since x and y are equal, the error is zero:
npt.assert_equal(my_err, np.zeros(my_x.shape[0]))
# Let's consider a slightly less silly function, that implements a linear
# relationship between inputs and outputs:
def not_so_silly_func(x, a, b):
return x*a + b
my_params = [1, 10]
my_x = np.linspace(-1, 1, 12)
# To test this, we calculate the relationship explicitely:
my_y = my_x * my_params[0] + my_params[1]
my_err = sb.opt_err_func(my_params, my_x, my_y, not_so_silly_func)
# Since x and y are equal, the error is zero:
npt.assert_equal(my_err, np.zeros(my_x.shape[0]))
def test_Model():
""" """
M = sb.Model()
x = np.linspace(0.1, 0.9, 22)
target_mu = 0.5
target_sigma = 1
target_y = sb.cumgauss(x, target_mu, target_sigma)
F = M.fit(x, target_y, initial=[target_mu, target_sigma])
npt.assert_equal(F.predict(x), target_y)
def test_params_regression():
"""
Test for regressions in model parameter values from provided data
"""
model = sb.Model()
ortho_x, ortho_y, ortho_n = sb.transform_data(op.join(data_path,
'ortho.csv'))
para_x, para_y, para_n = sb.transform_data(op.join(data_path,
'para.csv'))
ortho_fit = model.fit(ortho_x, ortho_y)
para_fit = model.fit(para_x, para_y)
npt.assert_almost_equal(ortho_fit.params[0], 0.46438638)
npt.assert_almost_equal(ortho_fit.params[1], 0.13845926)
npt.assert_almost_equal(para_fit.params[0], 0.57456788)
npt.assert_almost_equal(para_fit.params[1], 0.13684096)
| mit |
AtsushiSakai/PythonRobotics | ArmNavigation/rrt_star_seven_joint_arm_control/rrt_star_seven_joint_arm_control.py | 1 | 14158 | """
RRT* path planner for a seven joint arm
Author: Mahyar Abdeetedal (mahyaret)
"""
import math
import os
import sys
import random
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.abspath(__file__)) +
"/../n_joint_arm_3d/")
try:
from NLinkArm3d import NLinkArm
except ImportError:
raise
show_animation = True
verbose = False
class RobotArm(NLinkArm):
def get_points(self, joint_angle_list):
self.set_joint_angles(joint_angle_list)
x_list = []
y_list = []
z_list = []
trans = np.identity(4)
x_list.append(trans[0, 3])
y_list.append(trans[1, 3])
z_list.append(trans[2, 3])
for i in range(len(self.link_list)):
trans = np.dot(trans, self.link_list[i].transformation_matrix())
x_list.append(trans[0, 3])
y_list.append(trans[1, 3])
z_list.append(trans[2, 3])
return x_list, y_list, z_list
class RRTStar:
"""
Class for RRT Star planning
"""
class Node:
def __init__(self, x):
self.x = x
self.parent = None
self.cost = 0.0
def __init__(self, start, goal, robot, obstacle_list, rand_area,
expand_dis=.30,
path_resolution=.1,
goal_sample_rate=20,
max_iter=300,
connect_circle_dist=50.0
):
"""
Setting Parameter
start:Start Position [q1,...,qn]
goal:Goal Position [q1,...,qn]
obstacleList:obstacle Positions [[x,y,z,size],...]
randArea:Random Sampling Area [min,max]
"""
self.start = self.Node(start)
self.end = self.Node(goal)
self.dimension = len(start)
self.min_rand = rand_area[0]
self.max_rand = rand_area[1]
self.expand_dis = expand_dis
self.path_resolution = path_resolution
self.goal_sample_rate = goal_sample_rate
self.max_iter = max_iter
self.robot = robot
self.obstacle_list = obstacle_list
self.connect_circle_dist = connect_circle_dist
self.goal_node = self.Node(goal)
self.ax = plt.axes(projection='3d')
self.node_list = []
def planning(self, animation=False, search_until_max_iter=False):
"""
rrt star path planning
animation: flag for animation on or off
search_until_max_iter: search until max iteration for path
improving or not
"""
self.node_list = [self.start]
for i in range(self.max_iter):
if verbose:
print("Iter:", i, ", number of nodes:", len(self.node_list))
rnd = self.get_random_node()
nearest_ind = self.get_nearest_node_index(self.node_list, rnd)
new_node = self.steer(self.node_list[nearest_ind],
rnd,
self.expand_dis)
if self.check_collision(new_node, self.robot, self.obstacle_list):
near_inds = self.find_near_nodes(new_node)
new_node = self.choose_parent(new_node, near_inds)
if new_node:
self.node_list.append(new_node)
self.rewire(new_node, near_inds)
if animation and i % 5 == 0 and self.dimension <= 3:
self.draw_graph(rnd)
if (not search_until_max_iter) and new_node:
last_index = self.search_best_goal_node()
if last_index is not None:
return self.generate_final_course(last_index)
if verbose:
print("reached max iteration")
last_index = self.search_best_goal_node()
if last_index is not None:
return self.generate_final_course(last_index)
return None
def choose_parent(self, new_node, near_inds):
if not near_inds:
return None
# search nearest cost in near_inds
costs = []
for i in near_inds:
near_node = self.node_list[i]
t_node = self.steer(near_node, new_node)
if t_node and self.check_collision(t_node,
self.robot,
self.obstacle_list):
costs.append(self.calc_new_cost(near_node, new_node))
else:
costs.append(float("inf")) # the cost of collision node
min_cost = min(costs)
if min_cost == float("inf"):
print("There is no good path.(min_cost is inf)")
return None
min_ind = near_inds[costs.index(min_cost)]
new_node = self.steer(self.node_list[min_ind], new_node)
new_node.parent = self.node_list[min_ind]
new_node.cost = min_cost
return new_node
def search_best_goal_node(self):
dist_to_goal_list = [self.calc_dist_to_goal(n.x)
for n in self.node_list]
goal_inds = [dist_to_goal_list.index(i)
for i in dist_to_goal_list if i <= self.expand_dis]
safe_goal_inds = []
for goal_ind in goal_inds:
t_node = self.steer(self.node_list[goal_ind], self.goal_node)
if self.check_collision(t_node, self.robot, self.obstacle_list):
safe_goal_inds.append(goal_ind)
if not safe_goal_inds:
return None
min_cost = min([self.node_list[i].cost for i in safe_goal_inds])
for i in safe_goal_inds:
if self.node_list[i].cost == min_cost:
return i
return None
def find_near_nodes(self, new_node):
nnode = len(self.node_list) + 1
r = self.connect_circle_dist * math.sqrt((math.log(nnode) / nnode))
# if expand_dist exists, search vertices in
# a range no more than expand_dist
if hasattr(self, 'expand_dis'):
r = min(r, self.expand_dis)
dist_list = [np.sum((np.array(node.x) - np.array(new_node.x)) ** 2)
for node in self.node_list]
near_inds = [dist_list.index(i) for i in dist_list if i <= r ** 2]
return near_inds
def rewire(self, new_node, near_inds):
for i in near_inds:
near_node = self.node_list[i]
edge_node = self.steer(new_node, near_node)
if not edge_node:
continue
edge_node.cost = self.calc_new_cost(new_node, near_node)
no_collision = self.check_collision(edge_node,
self.robot,
self.obstacle_list)
improved_cost = near_node.cost > edge_node.cost
if no_collision and improved_cost:
self.node_list[i] = edge_node
self.propagate_cost_to_leaves(new_node)
def calc_new_cost(self, from_node, to_node):
d, _, _ = self.calc_distance_and_angle(from_node, to_node)
return from_node.cost + d
def propagate_cost_to_leaves(self, parent_node):
for node in self.node_list:
if node.parent == parent_node:
node.cost = self.calc_new_cost(parent_node, node)
self.propagate_cost_to_leaves(node)
def generate_final_course(self, goal_ind):
path = [self.end.x]
node = self.node_list[goal_ind]
while node.parent is not None:
path.append(node.x)
node = node.parent
path.append(node.x)
reversed(path)
return path
def calc_dist_to_goal(self, x):
distance = np.linalg.norm(np.array(x) - np.array(self.end.x))
return distance
def get_random_node(self):
if random.randint(0, 100) > self.goal_sample_rate:
rnd = self.Node(np.random.uniform(self.min_rand,
self.max_rand,
self.dimension))
else: # goal point sampling
rnd = self.Node(self.end.x)
return rnd
def steer(self, from_node, to_node, extend_length=float("inf")):
new_node = self.Node(list(from_node.x))
d, phi, theta = self.calc_distance_and_angle(new_node, to_node)
new_node.path_x = [list(new_node.x)]
if extend_length > d:
extend_length = d
n_expand = math.floor(extend_length / self.path_resolution)
start, end = np.array(from_node.x), np.array(to_node.x)
v = end - start
u = v / (np.sqrt(np.sum(v ** 2)))
for _ in range(n_expand):
new_node.x += u * self.path_resolution
new_node.path_x.append(list(new_node.x))
d, _, _ = self.calc_distance_and_angle(new_node, to_node)
if d <= self.path_resolution:
new_node.path_x.append(list(to_node.x))
new_node.parent = from_node
return new_node
def draw_graph(self, rnd=None):
plt.cla()
self.ax.axis([-1, 1, -1, 1])
self.ax.set_zlim(0, 1)
self.ax.grid(True)
for (ox, oy, oz, size) in self.obstacle_list:
self.plot_sphere(self.ax, ox, oy, oz, size=size)
if self.dimension > 3:
return self.ax
if rnd is not None:
self.ax.plot([rnd.x[0]], [rnd.x[1]], [rnd.x[2]], "^k")
for node in self.node_list:
if node.parent:
path = np.array(node.path_x)
plt.plot(path[:, 0], path[:, 1], path[:, 2], "-g")
self.ax.plot([self.start.x[0]], [self.start.x[1]],
[self.start.x[2]], "xr")
self.ax.plot([self.end.x[0]], [self.end.x[1]], [self.end.x[2]], "xr")
plt.pause(0.01)
return self.ax
@staticmethod
def get_nearest_node_index(node_list, rnd_node):
dlist = [np.sum((np.array(node.x) - np.array(rnd_node.x))**2)
for node in node_list]
minind = dlist.index(min(dlist))
return minind
@staticmethod
def plot_sphere(ax, x, y, z, size=1, color="k"):
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
xl = x+size*np.cos(u)*np.sin(v)
yl = y+size*np.sin(u)*np.sin(v)
zl = z+size*np.cos(v)
ax.plot_wireframe(xl, yl, zl, color=color)
@staticmethod
def calc_distance_and_angle(from_node, to_node):
dx = to_node.x[0] - from_node.x[0]
dy = to_node.x[1] - from_node.x[1]
dz = to_node.x[2] - from_node.x[2]
d = np.sqrt(np.sum((np.array(to_node.x) - np.array(from_node.x))**2))
phi = math.atan2(dy, dx)
theta = math.atan2(math.hypot(dx, dy), dz)
return d, phi, theta
@staticmethod
def calc_distance_and_angle2(from_node, to_node):
dx = to_node.x[0] - from_node.x[0]
dy = to_node.x[1] - from_node.x[1]
dz = to_node.x[2] - from_node.x[2]
d = math.sqrt(dx**2 + dy**2 + dz**2)
phi = math.atan2(dy, dx)
theta = math.atan2(math.hypot(dx, dy), dz)
return d, phi, theta
@staticmethod
def check_collision(node, robot, obstacleList):
if node is None:
return False
for (ox, oy, oz, size) in obstacleList:
for x in node.path_x:
x_list, y_list, z_list = robot.get_points(x)
dx_list = [ox - x_point for x_point in x_list]
dy_list = [oy - y_point for y_point in y_list]
dz_list = [oz - z_point for z_point in z_list]
d_list = [dx * dx + dy * dy + dz * dz
for (dx, dy, dz) in zip(dx_list,
dy_list,
dz_list)]
if min(d_list) <= size ** 2:
return False # collision
return True # safe
def main():
print("Start " + __file__)
# init NLinkArm with Denavit-Hartenberg parameters of panda
# https://frankaemika.github.io/docs/control_parameters.html
# [theta, alpha, a, d]
seven_joint_arm = RobotArm([[0., math.pi/2., 0., .333],
[0., -math.pi/2., 0., 0.],
[0., math.pi/2., 0.0825, 0.3160],
[0., -math.pi/2., -0.0825, 0.],
[0., math.pi/2., 0., 0.3840],
[0., math.pi/2., 0.088, 0.],
[0., 0., 0., 0.107]])
# ====Search Path with RRT====
obstacle_list = [
(-.3, -.3, .7, .1),
(.0, -.3, .7, .1),
(.2, -.1, .3, .15),
] # [x,y,size(radius)]
start = [0 for _ in range(len(seven_joint_arm.link_list))]
end = [1.5 for _ in range(len(seven_joint_arm.link_list))]
# Set Initial parameters
rrt_star = RRTStar(start=start,
goal=end,
rand_area=[0, 2],
max_iter=200,
robot=seven_joint_arm,
obstacle_list=obstacle_list)
path = rrt_star.planning(animation=show_animation,
search_until_max_iter=False)
if path is None:
print("Cannot find path")
else:
print("found path!!")
# Draw final path
if show_animation:
ax = rrt_star.draw_graph()
# Plot final configuration
x_points, y_points, z_points = seven_joint_arm.get_points(path[-1])
ax.plot([x for x in x_points],
[y for y in y_points],
[z for z in z_points],
"o-", color="red", ms=5, mew=0.5)
for i, q in enumerate(path):
x_points, y_points, z_points = seven_joint_arm.get_points(q)
ax.plot([x for x in x_points],
[y for y in y_points],
[z for z in z_points],
"o-", color="grey", ms=4, mew=0.5)
plt.pause(0.01)
plt.show()
if __name__ == '__main__':
main()
| mit |
andrewnc/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 65 | 50308 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
zorroblue/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 161 | 1380 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
FowlerLab/Enrich2 | enrich2/barcodeid.py | 1 | 5459 | import logging
from .seqlib import SeqLib
from .barcode import BarcodeSeqLib
from .barcodemap import BarcodeMap
import pandas as pd
from .plots import barcodemap_plot
from matplotlib.backends.backend_pdf import PdfPages
import os.path
class BcidSeqLib(BarcodeSeqLib):
"""
Class for counting data from barcoded sequencing libraries with non-variant
identifiers.
Creating a :py:class:`BcidSeqLib` requires a valid *config*
object with an ``'barcodes'`` entry and information.
The ``barcode_map`` keyword argument can be used to pass an existing
:py:class:`~seqlib.barcodemap.BarcodeMap`. Ensuring this is the
right :py:class:`~seqlib.barcodemap.BarcodeMap` is the responsibility
of the caller.
"""
treeview_class_name = "Barcoded ID SeqLib"
def __init__(self):
BarcodeSeqLib.__init__(self)
self.barcode_map = None
self.identifier_min_count = None
self.add_label("identifiers")
self.logger = logging.getLogger("{}.{}".format(__name__, self.__class__))
def configure(self, cfg, barcode_map=None):
"""
Set up the object using the config object *cfg*, usually derived from
a ``.json`` file.
"""
BarcodeSeqLib.configure(self, cfg)
self.logger = logging.getLogger(
"{}.{} - {}".format(__name__, self.__class__.__name__, self.name)
)
try:
if "min count" in cfg["identifiers"]:
self.identifier_min_count = int(cfg["identifiers"]["min count"])
else:
self.identifier_min_count = 0
if barcode_map is not None:
if barcode_map.filename == cfg["barcodes"]["map file"]:
self.barcode_map = barcode_map
else:
raise ValueError(
"Attempted to assign non-matching barcode map [{}]".format(
self.name
)
)
else:
self.barcode_map = BarcodeMap(
cfg["barcodes"]["map file"], is_variant=False
)
except KeyError as key:
raise KeyError(
"Missing required config value {key} [{name}]".format(
key=key, name=self.name
)
)
def serialize(self):
"""
Format this object (and its children) as a config object suitable for dumping to a config file.
"""
cfg = BarcodeSeqLib.serialize(self)
cfg["identifiers"] = dict()
if self.identifier_min_count > 0:
cfg["identifiers"]["min count"] = self.identifier_min_count
if self.barcode_map is not None: # required for creating new objects in GUI
cfg["barcodes"]["map file"] = self.barcode_map.filename
return cfg
def calculate(self):
"""
Counts the barcodes using :py:meth:`BarcodeSeqLib.count` and combines them into
identifier counts using the :py:class:`BarcodeMap`.
"""
if not self.check_store("/main/identifiers/counts"):
BarcodeSeqLib.calculate(self) # count the barcodes
df_dict = dict()
barcode_identifiers = dict()
self.logger.info("Converting barcodes to identifiers")
# store mapped barcodes
self.save_filtered_counts(
"barcodes",
"index in self.barcode_map.keys() & count >= self.barcode_min_count",
)
# count identifiers associated with the barcodes
for bc, count in self.store["/main/barcodes/counts"].iterrows():
count = count["count"]
identifier = self.barcode_map[bc]
try:
df_dict[identifier] += count
except KeyError:
df_dict[identifier] = count
barcode_identifiers[bc] = identifier
# save counts, filtering based on the min count
self.save_counts(
"identifiers",
{
k: v
for k, v in df_dict.iteritems()
if v >= self.identifier_min_count
},
raw=False,
)
del df_dict
# write the active subset of the BarcodeMap to the store
barcodes = barcode_identifiers.keys()
barcode_identifiers = pd.DataFrame(
{"value": [barcode_identifiers[bc] for bc in barcodes]}, index=barcodes
)
del barcodes
barcode_identifiers.sort_values("value", inplace=True)
self.store.put(
"/raw/barcodemap",
barcode_identifiers,
data_columns=barcode_identifiers.columns,
format="table",
)
del barcode_identifiers
# self.report_filter_stats()
self.save_filter_stats()
def make_plots(self):
"""
Make plots for :py:class:`~seqlib.seqlib.BcidSeqLib` objects.
Creates plot of the number of barcodes mapping to each identifier.
"""
if self.plots_requested:
SeqLib.make_plots(self)
# open the PDF file
pdf = PdfPages(os.path.join(self.plot_dir, "barcodes_per_identifier.pdf"))
barcodemap_plot(self, pdf)
pdf.close()
| bsd-3-clause |
samzhang111/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
louispotok/pandas | pandas/io/date_converters.py | 11 | 1901 | """This module is designed for community supported date conversion functions"""
from pandas.compat import range, map
import numpy as np
from pandas._libs.tslibs import parsing
def parse_date_time(date_col, time_col):
date_col = _maybe_cast(date_col)
time_col = _maybe_cast(time_col)
return parsing.try_parse_date_and_time(date_col, time_col)
def parse_date_fields(year_col, month_col, day_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
return parsing.try_parse_year_month_day(year_col, month_col, day_col)
def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col,
second_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
hour_col = _maybe_cast(hour_col)
minute_col = _maybe_cast(minute_col)
second_col = _maybe_cast(second_col)
return parsing.try_parse_datetime_components(year_col, month_col, day_col,
hour_col, minute_col,
second_col)
def generic_parser(parse_func, *cols):
N = _check_columns(cols)
results = np.empty(N, dtype=object)
for i in range(N):
args = [c[i] for c in cols]
results[i] = parse_func(*args)
return results
def _maybe_cast(arr):
if not arr.dtype.type == np.object_:
arr = np.array(arr, dtype=object)
return arr
def _check_columns(cols):
if not len(cols):
raise AssertionError("There must be at least 1 column")
head, tail = cols[0], cols[1:]
N = len(head)
for i, n in enumerate(map(len, tail)):
if n != N:
raise AssertionError('All columns must have the same length: {0}; '
'column {1} has length {2}'.format(N, i, n))
return N
| bsd-3-clause |
pompiduskus/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
cauchycui/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
nilmtk/nilmtk | docs/source/conf.py | 7 | 12943 | # -*- coding: utf-8 -*-
#
# NILMTK documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 10 09:22:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'numpydoc',
# 'inheritance_diagram',
'sphinx.ext.autosummary'
]
# 'matplotlib.sphinxext.plot_directive']
# 'matplotlib.sphinxext.ipython_directive']
# The line below is necessary to stop thousands of warnings
# see http://stackoverflow.com/a/15210813/732596
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NILMTK'
copyright = u'2014, NILMTK Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
html_theme = 'bootstrap'
#html_theme = 'linfiniti-sphinx-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
# 'navbar_title': "Demo",
# Tab name for entire site. (Default: "Site")
# 'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
("github", "https://github.com/nilmtk/nilmtk", True),
],
# Render the next and previous page links in navbar. (Default: true)
# 'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
# 'navbar_pagenav': True,
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
# 'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
# 'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
# 'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
# 'navbar_fixed_top': "false",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
# 'source_link_position': "nav",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo" or "united".
# 'bootswatch_theme': "cosmo",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
# 'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
#html_theme_path = [".."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'NILMTKdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'NILMTK.tex', u'NILMTK Documentation',
u'NILMTK Authors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nilmtk', u'NILMTK Documentation',
[u'NILMTK Authors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'NILMTK', u'NILMTK Documentation',
u'NILMTK Authors', 'NILMTK', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'NILMTK'
epub_author = u'NILMTK Authors'
epub_publisher = u'NILMTK Authors'
epub_copyright = u'2014, NILMTK Authors'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'NILMTK'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| apache-2.0 |
m3drano/power-simulation | tools/plot_histogram.py | 1 | 3433 | #!/usr/bin/env python3
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plots the histogram of one of the trace keys."""
import argparse
import logging
import math
import sys
import matplotlib
matplotlib.use('Agg')
# pylint: disable=wrong-import-position
import matplotlib.mlab
import matplotlib.pyplot
import numpy
import scipy.stats
from parse_trace import parse_trace
def plot_histogram(trace, key, nbins, distribution_name, xmax):
"""Plots a trace."""
all_items = numpy.asarray([
i for pc in trace.values() for i in pc[key]])
distribution = getattr(scipy.stats, distribution_name)
if nbins is None:
# Use the Freedman-Diaconis estimate.
nbins = int((numpy.max(all_items) - numpy.min(all_items))
/ (2 * scipy.stats.iqr(all_items)
* math.pow(len(all_items), -1/3)))
logging.warning('Using %d bins as default for %d samples.',
nbins, len(all_items))
fit = distribution(*distribution.fit(all_items))
matplotlib.pyplot.style.use('bmh')
_, axis = matplotlib.pyplot.subplots(1, 1)
data, bins, _ = axis.hist(all_items, nbins, density=True,
label='Histogram for key "%s"' % key)
axis.plot(bins, fit.pdf(bins), 'r--', linewidth=1,
label='Best %s fit for key "%s"' % (distribution_name, key))
axis.set_ylim([0.0, max(data)])
if xmax is not None:
axis.set_xlim([0.0, xmax])
matplotlib.pyplot.xlabel('Interval duration (s)')
matplotlib.pyplot.ylabel('Probability')
axis.grid(True, which='both')
axis.legend(loc='best', frameon=False)
matplotlib.pyplot.savefig('histogram.png')
def main():
"""Just parses arguments and moves forward."""
parser = argparse.ArgumentParser()
parser.add_argument('--trace', required=True,
dest='trace_file',
help='path to the trace file to analyse')
parser.add_argument('--key', required=True,
dest='key',
help='key in the trace to process')
parser.add_argument('--bins',
dest='nbins', type=int, default=None,
help='number of histogram hins to have')
parser.add_argument('--distribution_to_fit',
dest='distribution_to_fit', default='norm',
help='which distribution to fit')
parser.add_argument('--xmax',
dest='xmax', type=int, default=None,
help='max for the x axis on the plots')
args = parser.parse_args()
try:
plot_histogram(parse_trace(args.trace_file), args.key, args.nbins,
args.distribution_to_fit, args.xmax)
except KeyError:
print('Invalid key: %s' % args.key)
return 1
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
abhitopia/tensorflow | tensorflow/examples/learn/hdf5_classification.py | 60 | 2190 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, h5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
learn = tf.contrib.learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
chenyyx/scikit-learn-doc-zh | examples/en/linear_model/plot_multi_task_lasso_support.py | 77 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
# #############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| gpl-3.0 |
cBeaird/SemEval_Character-Identification-on-Multiparty-Dialogues | vcu_cmsc_516_semeval4_nn.py | 1 | 9181 | #!/usr/bin/env python
"""
Application start for the SemEval 2018 task 4 application: conference resolution for SemEval 2018
This file is the main starting point for the application. This files allow for the application to
be called with a series of input parameters to perform the different functions required by the
application. The core functionality is in the semEval_core_functions python file while the core
model data is in the semEval_core_model file additional information can be found in these files.
"""
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import argparse
import tensorflow
import semEval_core_model as sEcm
import semEval_core_functions as sEcf
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.contrib.learn as learn
import numpy as np
__author__ = 'Casey Beaird'
__credits__ = ['Casey Beaird', 'Chase Greco', 'Brandon Watts']
__license__ = 'MIT'
__version__ = '0.1'
PATTERN_FOR_DOC_ID = '(?:\/.*-s)([0-9]*)(?:[a-z])([0-9]*)'
MENTION_FINDER = '(?:^#)(?:.*)([0-9)]$)'
# build the command line parser and add the options
pars = argparse.ArgumentParser(usage='Main python file for project package \'VCU CMSC516 SemEval Task4\'',
formatter_class=argparse.RawTextHelpFormatter,
description='''
Main python file for VCU CMSC 516 SemEval 2018 project task 4:
Character Identification on Multiparty Dialogues
This is the conference resolution problem, identify reference in corpora given the
speaker.
The application expects an entity map and a training file.
example: python vcu_cmsc516_semeval4.py -m ./entityMap.txt -t ./trainingData.conll
optionally a list of column headings can be supplied that describe your conll data
files. The list options are specified here in this help.''',
version='0.1')
# model name this is the name of the model to create with new map or add to or to use with an evaluation
pars.add_argument('-m', '--model',
help='Model to be used/created/extended by the application this item should be specified always.',
dest='model',
required=True)
# Specifying training is requested (as opposed to testing)
pars.add_argument('-t', '--train',
help='Train/extend the model {m} using the {data_file} provided.',
action='store_true')
# Specifying training is requested (as opposed to testing)
pars.add_argument('-e', '--evaluate',
help='Evaluate the {data_file} using the training model {m} provided.',
action='store_true')
# map file this is the entity map file for the named entities
pars.add_argument('-mf', '--mapFile',
help='name of entity map the file that contains the entity names and their entity IDs',
type=file,
dest='map_file')
# training data file
pars.add_argument('-df', '--dataFile',
help='Input file for either training or evaluating',
type=file,
dest='data_file')
# column headers for the conll file if this is not specified then the headings file is assumed to
# be in the format from the SemEval 2018 Task 4 format that is specified in the core model file.
pars.add_argument('-d', '--headers',
help='column type list for the order of columns in you training file',
dest='columns',
choices=list(sEcm.DEFAULT_HEADINGS),
default=sEcm.DEFAULT_HEADINGS,
nargs='*')
arguments = pars.parse_args()
d = vars(arguments)
# check args for problems
# todo parse the command line arguments this will create the namespace class that gives access to the
# arguments passed in the command line. This will need to be broken out to deal with all the disjoint
# sets of operations we want:
# 1: train and pickle training data
# 2: init the model
# 3: evaluate from the current model
if d['train'] and d['evaluate']:
print('cannot train and evaluate at the same time on the same data file!\n')
exit(0)
if d['data_file'] is None:
print('need a data file to evaluate or train on')
exit(0)
# columns object will always exist because there is a default list of columns so we can set the columns
# the training data uses the Default_headings in the model python file so we dont need to care about dealing
# with in in a specific way
if d['columns'] != sEcm.DEFAULT_HEADINGS:
sEcm.DEFAULT_HEADINGS = d['columns']
# will need a way to save models
sEcm.nn_model = dict()
# start the NN training
if d['train']:
if d['map_file'] is not None:
sEcm.nn_model[sEcm.MODEL_ENTITY_MAP] = sEcf.add_map_file_to_nn_model(d['map_file'])
if d['data_file'] is not None:
nn_data = sEcf.train_nn_model(d['data_file'])
train, evaluate = train_test_split(nn_data[0][1:])
with open('train_data.csv', 'w') as parsed_data_file:
# parsed_data_file.write(','.join(str(h) for h in nn_data[0][0]) + '\n')
for instance in train:
parsed_data_file.write(','.join(str(c) for c in instance) + '\n')
with open('evaluate_data.csv', 'w') as parsed_data_file:
# parsed_data_file.write(','.join(str(h) for h in nn_data[0][0]) + '\n')
for instance in evaluate:
parsed_data_file.write(','.join(str(c) for c in instance) + '\n')
# Tensorflow part
training_set = learn.datasets.base.load_csv_without_header(filename='train_data.csv',
target_dtype=np.int,
features_dtype=np.int)
evaluate_set = learn.datasets.base.load_csv_without_header(filename='evaluate_data.csv',
target_dtype=np.int,
features_dtype=np.int)
# 'season', 'episode', 'word', 'pos', 'lemma', 'speaker', 'class'
train_array = np.array(training_set.data)
season = tf.feature_column.numeric_column('season')
season_v = train_array[:, 0]
episode = tf.feature_column.numeric_column('episode')
episode_v = train_array[:, 1]
word = tf.feature_column.numeric_column('word')
word_v = train_array[:, 2]
pos = tf.feature_column.numeric_column('pos')
pos_v = train_array[:, 3]
lemma = tf.feature_column.numeric_column('lemma')
lemma_v = train_array[:, 4]
speaker = tf.feature_column.numeric_column('speaker')
speaker_v = train_array[:, 5]
feature_columns = [season, episode, word, pos, lemma, speaker]
# feature_columns = [tf.feature_column.numeric_column('instance', [6])]
classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=[1000, 500, 401],
n_classes=len(sEcm.nn_model[sEcm.MODEL_ENTITY_MAP]),
model_dir='simple_nn_model')
train_function = tf.estimator.inputs.numpy_input_fn(x={'season': season_v, 'episode': episode_v,
'word': word_v, 'pos': pos_v,
'lemma': lemma_v, 'speaker': speaker_v},
# {'instance': np.array(training_set.data)},
y=np.array(training_set.target),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_function, steps=5000)
eval_array = np.array(evaluate_set.data)
season_ev = eval_array[:, 0]
episode_ev = eval_array[:, 1]
word_ev = eval_array[:, 2]
pos_ev = eval_array[:, 3]
lemma_ev = eval_array[:, 4]
speaker_ev = eval_array[:, 5]
evaluate_function = tf.estimator.inputs.numpy_input_fn(x={'season': season_ev, 'episode': episode_ev,
'word': word_ev, 'pos': pos_ev,
'lemma': lemma_ev, 'speaker': speaker_ev},
# {'instance': np.array(evaluate_set.data)},
y=np.array(evaluate_set.target),
num_epochs=1,
shuffle=False)
accuracy = classifier.evaluate(input_fn=evaluate_function)['accuracy']
print(accuracy)
else:
print('Nothing was asked of me!\nThank you and have a good day!')
exit(0)
| mit |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/tests/test_triangulation.py | 9 | 39659 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
from nose.tools import assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal,\
assert_array_less
import numpy.ma.testutils as matest
from matplotlib.testing.decorators import image_comparison
import matplotlib.cm as cm
from matplotlib.path import Path
def test_delaunay():
# No duplicate points, regular grid.
nx = 5
ny = 4
x, y = np.meshgrid(np.linspace(0.0, 1.0, nx), np.linspace(0.0, 1.0, ny))
x = x.ravel()
y = y.ravel()
npoints = nx*ny
ntriangles = 2 * (nx-1) * (ny-1)
nedges = 3*nx*ny - 2*nx - 2*ny + 1
# Create delaunay triangulation.
triang = mtri.Triangulation(x, y)
# The tests in the remainder of this function should be passed by any
# triangulation that does not contain duplicate points.
# Points - floating point.
assert_array_almost_equal(triang.x, x)
assert_array_almost_equal(triang.y, y)
# Triangles - integers.
assert_equal(len(triang.triangles), ntriangles)
assert_equal(np.min(triang.triangles), 0)
assert_equal(np.max(triang.triangles), npoints-1)
# Edges - integers.
assert_equal(len(triang.edges), nedges)
assert_equal(np.min(triang.edges), 0)
assert_equal(np.max(triang.edges), npoints-1)
# Neighbors - integers.
# Check that neighbors calculated by C++ triangulation class are the same
# as those returned from delaunay routine.
neighbors = triang.neighbors
triang._neighbors = None
assert_array_equal(triang.neighbors, neighbors)
# Is each point used in at least one triangle?
assert_array_equal(np.unique(triang.triangles), np.arange(npoints))
def test_delaunay_duplicate_points():
# x[duplicate] == x[duplicate_of]
# y[duplicate] == y[duplicate_of]
npoints = 10
duplicate = 7
duplicate_of = 3
np.random.seed(23)
x = np.random.random((npoints))
y = np.random.random((npoints))
x[duplicate] = x[duplicate_of]
y[duplicate] = y[duplicate_of]
# Create delaunay triangulation.
triang = mtri.Triangulation(x, y)
# Duplicate points should be ignored, so the index of the duplicate points
# should not appear in any triangle.
assert_array_equal(np.unique(triang.triangles),
np.delete(np.arange(npoints), duplicate))
def test_delaunay_points_in_line():
# Cannot triangulate points that are all in a straight line, but check
# that delaunay code fails gracefully.
x = np.linspace(0.0, 10.0, 11)
y = np.linspace(0.0, 10.0, 11)
assert_raises(RuntimeError, mtri.Triangulation, x, y)
# Add an extra point not on the line and the triangulation is OK.
x = np.append(x, 2.0)
y = np.append(y, 8.0)
triang = mtri.Triangulation(x, y)
def test_delaunay_insufficient_points():
# Triangulation should raise a ValueError if passed less than 3 points.
assert_raises(ValueError, mtri.Triangulation, [], [])
assert_raises(ValueError, mtri.Triangulation, [1], [5])
assert_raises(ValueError, mtri.Triangulation, [1, 2], [5, 6])
# Triangulation should also raise a ValueError if passed duplicate points
# such that there are less than 3 unique points.
assert_raises(ValueError, mtri.Triangulation, [1, 2, 1], [5, 6, 5])
assert_raises(ValueError, mtri.Triangulation, [1, 2, 2], [5, 6, 6])
assert_raises(ValueError, mtri.Triangulation, [1, 1, 1, 2, 1, 2],
[5, 5, 5, 6, 5, 6])
def test_delaunay_robust():
# Fails when mtri.Triangulation uses matplotlib.delaunay, works when using
# qhull.
tri_points = np.array([
[0.8660254037844384, -0.5000000000000004],
[0.7577722283113836, -0.5000000000000004],
[0.6495190528383288, -0.5000000000000003],
[0.5412658773652739, -0.5000000000000003],
[0.811898816047911, -0.40625000000000044],
[0.7036456405748561, -0.4062500000000004],
[0.5953924651018013, -0.40625000000000033]])
test_points = np.asarray([
[0.58, -0.46],
[0.65, -0.46],
[0.65, -0.42],
[0.7, -0.48],
[0.7, -0.44],
[0.75, -0.44],
[0.8, -0.48]])
# Utility function that indicates if a triangle defined by 3 points
# (xtri, ytri) contains the test point xy. Avoid calling with a point that
# lies on or very near to an edge of the triangle.
def tri_contains_point(xtri, ytri, xy):
tri_points = np.vstack((xtri, ytri)).T
return Path(tri_points).contains_point(xy)
# Utility function that returns how many triangles of the specified
# triangulation contain the test point xy. Avoid calling with a point that
# lies on or very near to an edge of any triangle in the triangulation.
def tris_contain_point(triang, xy):
count = 0
for tri in triang.triangles:
if tri_contains_point(triang.x[tri], triang.y[tri], xy):
count += 1
return count
# Using matplotlib.delaunay, an invalid triangulation is created with
# overlapping triangles; qhull is OK.
triang = mtri.Triangulation(tri_points[:, 0], tri_points[:, 1])
for test_point in test_points:
assert_equal(tris_contain_point(triang, test_point), 1)
# If ignore the first point of tri_points, matplotlib.delaunay throws a
# KeyError when calculating the convex hull; qhull is OK.
triang = mtri.Triangulation(tri_points[1:, 0], tri_points[1:, 1])
@image_comparison(baseline_images=['tripcolor1'], extensions=['png'])
def test_tripcolor():
x = np.asarray([0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1, 0.75])
y = np.asarray([0, 0, 0, 0.5, 0.5, 0.5, 1, 1, 1, 0.75])
triangles = np.asarray([
[0, 1, 3], [1, 4, 3],
[1, 2, 4], [2, 5, 4],
[3, 4, 6], [4, 7, 6],
[4, 5, 9], [7, 4, 9], [8, 7, 9], [5, 8, 9]])
# Triangulation with same number of points and triangles.
triang = mtri.Triangulation(x, y, triangles)
Cpoints = x + 0.5*y
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
Cfaces = 0.5*xmid + ymid
plt.subplot(121)
plt.tripcolor(triang, Cpoints, edgecolors='k')
plt.title('point colors')
plt.subplot(122)
plt.tripcolor(triang, facecolors=Cfaces, edgecolors='k')
plt.title('facecolors')
def test_no_modify():
# Test that Triangulation does not modify triangles array passed to it.
triangles = np.array([[3, 2, 0], [3, 1, 0]], dtype=np.int32)
points = np.array([(0, 0), (0, 1.1), (1, 0), (1, 1)])
old_triangles = triangles.copy()
tri = mtri.Triangulation(points[:, 0], points[:, 1], triangles)
edges = tri.edges
assert_array_equal(old_triangles, triangles)
def test_trifinder():
# Test points within triangles of masked triangulation.
x, y = np.meshgrid(np.arange(4), np.arange(4))
x = x.ravel()
y = y.ravel()
triangles = [[0, 1, 4], [1, 5, 4], [1, 2, 5], [2, 6, 5], [2, 3, 6],
[3, 7, 6], [4, 5, 8], [5, 9, 8], [5, 6, 9], [6, 10, 9],
[6, 7, 10], [7, 11, 10], [8, 9, 12], [9, 13, 12], [9, 10, 13],
[10, 14, 13], [10, 11, 14], [11, 15, 14]]
mask = np.zeros(len(triangles))
mask[8:10] = 1
triang = mtri.Triangulation(x, y, triangles, mask)
trifinder = triang.get_trifinder()
xs = [0.25, 1.25, 2.25, 3.25]
ys = [0.25, 1.25, 2.25, 3.25]
xs, ys = np.meshgrid(xs, ys)
xs = xs.ravel()
ys = ys.ravel()
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 2, 4, -1, 6, -1, 10, -1,
12, 14, 16, -1, -1, -1, -1, -1])
tris = trifinder(xs-0.5, ys-0.5)
assert_array_equal(tris, [-1, -1, -1, -1, -1, 1, 3, 5,
-1, 7, -1, 11, -1, 13, 15, 17])
# Test points exactly on boundary edges of masked triangulation.
xs = [0.5, 1.5, 2.5, 0.5, 1.5, 2.5, 1.5, 1.5, 0.0, 1.0, 2.0, 3.0]
ys = [0.0, 0.0, 0.0, 3.0, 3.0, 3.0, 1.0, 2.0, 1.5, 1.5, 1.5, 1.5]
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 2, 4, 13, 15, 17, 3, 14, 6, 7, 10, 11])
# Test points exactly on boundary corners of masked triangulation.
xs = [0.0, 3.0]
ys = [0.0, 3.0]
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 17])
# Test triangles with horizontal colinear points. These are not valid
# triangulations, but we try to deal with the simplest violations.
delta = 0.0 # If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
x = [1.5, 0, 1, 2, 3, 1.5, 1.5]
y = [-1, 0, 0, 0, 0, delta, 1]
triangles = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [1, 2, 5], [2, 3, 5],
[3, 4, 5], [1, 5, 6], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.1, 0.4, 0.9, 1.4, 1.9, 2.4, 2.9]
ys = [-0.1, 0.1]
xs, ys = np.meshgrid(xs, ys)
tris = trifinder(xs, ys)
assert_array_equal(tris, [[-1, 0, 0, 1, 1, 2, -1],
[-1, 6, 6, 6, 7, 7, -1]])
# Test triangles with vertical colinear points. These are not valid
# triangulations, but we try to deal with the simplest violations.
delta = 0.0 # If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
x = [-1, -delta, 0, 0, 0, 0, 1]
y = [1.5, 1.5, 0, 1, 2, 3, 1.5]
triangles = [[0, 1, 2], [0, 1, 5], [1, 2, 3], [1, 3, 4], [1, 4, 5],
[2, 6, 3], [3, 6, 4], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.1, 0.1]
ys = [-0.1, 0.4, 0.9, 1.4, 1.9, 2.4, 2.9]
xs, ys = np.meshgrid(xs, ys)
tris = trifinder(xs, ys)
assert_array_equal(tris, [[-1, -1], [0, 5], [0, 5], [0, 6], [1, 6], [1, 7],
[-1, -1]])
# Test that changing triangulation by setting a mask causes the trifinder
# to be reinitialised.
x = [0, 1, 0, 1]
y = [0, 0, 1, 1]
triangles = [[0, 1, 2], [1, 3, 2]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.2, 0.2, 0.8, 1.2]
ys = [ 0.5, 0.5, 0.5, 0.5]
tris = trifinder(xs, ys)
assert_array_equal(tris, [-1, 0, 1, -1])
triang.set_mask([1, 0])
assert_equal(trifinder, triang.get_trifinder())
tris = trifinder(xs, ys)
assert_array_equal(tris, [-1, -1, 1, -1])
def test_triinterp():
# Test points within triangles of masked triangulation.
x, y = np.meshgrid(np.arange(4), np.arange(4))
x = x.ravel()
y = y.ravel()
z = 1.23*x - 4.79*y
triangles = [[0, 1, 4], [1, 5, 4], [1, 2, 5], [2, 6, 5], [2, 3, 6],
[3, 7, 6], [4, 5, 8], [5, 9, 8], [5, 6, 9], [6, 10, 9],
[6, 7, 10], [7, 11, 10], [8, 9, 12], [9, 13, 12], [9, 10, 13],
[10, 14, 13], [10, 11, 14], [11, 15, 14]]
mask = np.zeros(len(triangles))
mask[8:10] = 1
triang = mtri.Triangulation(x, y, triangles, mask)
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
xs = np.linspace(0.25, 2.75, 6)
ys = [0.25, 0.75, 2.25, 2.75]
xs, ys = np.meshgrid(xs, ys) # Testing arrays with array.ndim = 2
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
assert_array_almost_equal(zs, (1.23*xs - 4.79*ys))
# Test points outside triangulation.
xs = [-0.25, 1.25, 1.75, 3.25]
ys = xs
xs, ys = np.meshgrid(xs, ys)
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = linear_interp(xs, ys)
assert_array_equal(zs.mask, [[True]*4]*4)
# Test mixed configuration (outside / inside).
xs = np.linspace(0.25, 1.75, 6)
ys = [0.25, 0.75, 1.25, 1.75]
xs, ys = np.meshgrid(xs, ys)
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
matest.assert_array_almost_equal(zs, (1.23*xs - 4.79*ys))
mask = (xs >= 1) * (xs <= 2) * (ys >= 1) * (ys <= 2)
assert_array_equal(zs.mask, mask)
# 2nd order patch test: on a grid with an 'arbitrary shaped' triangle,
# patch test shall be exact for quadratic functions and cubic
# interpolator if *kind* = user
(a, b, c) = (1.23, -4.79, 0.6)
def quad(x, y):
return a*(x-0.5)**2 + b*(y-0.5)**2 + c*x*y
def gradient_quad(x, y):
return (2*a*(x-0.5) + c*y, 2*b*(y-0.5) + c*x)
x = np.array([0.2, 0.33367, 0.669, 0., 1., 1., 0.])
y = np.array([0.3, 0.80755, 0.4335, 0., 0., 1., 1.])
triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5],
[1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]])
triang = mtri.Triangulation(x, y, triangles)
z = quad(x, y)
dz = gradient_quad(x, y)
# test points for 2nd order patch test
xs = np.linspace(0., 1., 5)
ys = np.linspace(0., 1., 5)
xs, ys = np.meshgrid(xs, ys)
cubic_user = mtri.CubicTriInterpolator(triang, z, kind='user', dz=dz)
interp_zs = cubic_user(xs, ys)
assert_array_almost_equal(interp_zs, quad(xs, ys))
(interp_dzsdx, interp_dzsdy) = cubic_user.gradient(x, y)
(dzsdx, dzsdy) = gradient_quad(x, y)
assert_array_almost_equal(interp_dzsdx, dzsdx)
assert_array_almost_equal(interp_dzsdy, dzsdy)
# Cubic improvement: cubic interpolation shall perform better than linear
# on a sufficiently dense mesh for a quadratic function.
n = 11
x, y = np.meshgrid(np.linspace(0., 1., n+1), np.linspace(0., 1., n+1))
x = x.ravel()
y = y.ravel()
z = quad(x, y)
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1))
xs, ys = np.meshgrid(np.linspace(0.1, 0.9, 5), np.linspace(0.1, 0.9, 5))
xs = xs.ravel()
ys = ys.ravel()
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
zs = quad(xs, ys)
diff_lin = np.abs(linear_interp(xs, ys) - zs)
for interp in (cubic_min_E, cubic_geom):
diff_cubic = np.abs(interp(xs, ys) - zs)
assert(np.max(diff_lin) >= 10.*np.max(diff_cubic))
assert(np.dot(diff_lin, diff_lin) >=
100.*np.dot(diff_cubic, diff_cubic))
def test_triinterpcubic_C1_continuity():
# Below the 4 tests which demonstrate C1 continuity of the
# TriCubicInterpolator (testing the cubic shape functions on arbitrary
# triangle):
#
# 1) Testing continuity of function & derivatives at corner for all 9
# shape functions. Testing also function values at same location.
# 2) Testing C1 continuity along each edge (as gradient is polynomial of
# 2nd order, it is sufficient to test at the middle).
# 3) Testing C1 continuity at triangle barycenter (where the 3 subtriangles
# meet)
# 4) Testing C1 continuity at median 1/3 points (midside between 2
# subtriangles)
# Utility test function check_continuity
def check_continuity(interpolator, loc, values=None):
"""
Checks the continuity of interpolator (and its derivatives) near
location loc. Can check the value at loc itself if *values* is
provided.
*interpolator* TriInterpolator
*loc* location to test (x0, y0)
*values* (optional) array [z0, dzx0, dzy0] to check the value at *loc*
"""
n_star = 24 # Number of continuity points in a boundary of loc
epsilon = 1.e-10 # Distance for loc boundary
k = 100. # Continuity coefficient
(loc_x, loc_y) = loc
star_x = loc_x + epsilon*np.cos(np.linspace(0., 2*np.pi, n_star))
star_y = loc_y + epsilon*np.sin(np.linspace(0., 2*np.pi, n_star))
z = interpolator([loc_x], [loc_y])[0]
(dzx, dzy) = interpolator.gradient([loc_x], [loc_y])
if values is not None:
assert_array_almost_equal(z, values[0])
assert_array_almost_equal(dzx[0], values[1])
assert_array_almost_equal(dzy[0], values[2])
diff_z = interpolator(star_x, star_y) - z
(tab_dzx, tab_dzy) = interpolator.gradient(star_x, star_y)
diff_dzx = tab_dzx - dzx
diff_dzy = tab_dzy - dzy
assert_array_less(diff_z, epsilon*k)
assert_array_less(diff_dzx, epsilon*k)
assert_array_less(diff_dzy, epsilon*k)
# Drawing arbitrary triangle (a, b, c) inside a unit square.
(ax, ay) = (0.2, 0.3)
(bx, by) = (0.33367, 0.80755)
(cx, cy) = (0.669, 0.4335)
x = np.array([ax, bx, cx, 0., 1., 1., 0.])
y = np.array([ay, by, cy, 0., 0., 1., 1.])
triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5],
[1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]])
triang = mtri.Triangulation(x, y, triangles)
for idof in range(9):
z = np.zeros(7, dtype=np.float64)
dzx = np.zeros(7, dtype=np.float64)
dzy = np.zeros(7, dtype=np.float64)
values = np.zeros([3, 3], dtype=np.float64)
case = idof//3
values[case, idof % 3] = 1.0
if case == 0:
z[idof] = 1.0
elif case == 1:
dzx[idof % 3] = 1.0
elif case == 2:
dzy[idof % 3] = 1.0
interp = mtri.CubicTriInterpolator(triang, z, kind='user',
dz=(dzx, dzy))
# Test 1) Checking values and continuity at nodes
check_continuity(interp, (ax, ay), values[:, 0])
check_continuity(interp, (bx, by), values[:, 1])
check_continuity(interp, (cx, cy), values[:, 2])
# Test 2) Checking continuity at midside nodes
check_continuity(interp, ((ax+bx)*0.5, (ay+by)*0.5))
check_continuity(interp, ((ax+cx)*0.5, (ay+cy)*0.5))
check_continuity(interp, ((cx+bx)*0.5, (cy+by)*0.5))
# Test 3) Checking continuity at barycenter
check_continuity(interp, ((ax+bx+cx)/3., (ay+by+cy)/3.))
# Test 4) Checking continuity at median 1/3-point
check_continuity(interp, ((4.*ax+bx+cx)/6., (4.*ay+by+cy)/6.))
check_continuity(interp, ((ax+4.*bx+cx)/6., (ay+4.*by+cy)/6.))
check_continuity(interp, ((ax+bx+4.*cx)/6., (ay+by+4.*cy)/6.))
def test_triinterpcubic_cg_solver():
# Now 3 basic tests of the Sparse CG solver, used for
# TriCubicInterpolator with *kind* = 'min_E'
# 1) A commonly used test involves a 2d Poisson matrix.
def poisson_sparse_matrix(n, m):
"""
Sparse Poisson matrix.
Returns the sparse matrix in coo format resulting from the
discretisation of the 2-dimensional Poisson equation according to a
finite difference numerical scheme on a uniform (n, m) grid.
Size of the matrix: (n*m, n*m)
"""
l = m*n
rows = np.concatenate([
np.arange(l, dtype=np.int32),
np.arange(l-1, dtype=np.int32), np.arange(1, l, dtype=np.int32),
np.arange(l-n, dtype=np.int32), np.arange(n, l, dtype=np.int32)])
cols = np.concatenate([
np.arange(l, dtype=np.int32),
np.arange(1, l, dtype=np.int32), np.arange(l-1, dtype=np.int32),
np.arange(n, l, dtype=np.int32), np.arange(l-n, dtype=np.int32)])
vals = np.concatenate([
4*np.ones(l, dtype=np.float64),
-np.ones(l-1, dtype=np.float64), -np.ones(l-1, dtype=np.float64),
-np.ones(l-n, dtype=np.float64), -np.ones(l-n, dtype=np.float64)])
# In fact +1 and -1 diags have some zeros
vals[l:2*l-1][m-1::m] = 0.
vals[2*l-1:3*l-2][m-1::m] = 0.
return vals, rows, cols, (n*m, n*m)
# Instantiating a sparse Poisson matrix of size 48 x 48:
(n, m) = (12, 4)
mat = mtri.triinterpolate._Sparse_Matrix_coo(*poisson_sparse_matrix(n, m))
mat.compress_csc()
mat_dense = mat.to_dense()
# Testing a sparse solve for all 48 basis vector
for itest in range(n*m):
b = np.zeros(n*m, dtype=np.float64)
b[itest] = 1.
x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.zeros(n*m),
tol=1.e-10)
assert_array_almost_equal(np.dot(mat_dense, x), b)
# 2) Same matrix with inserting 2 rows - cols with null diag terms
# (but still linked with the rest of the matrix by extra-diag terms)
(i_zero, j_zero) = (12, 49)
vals, rows, cols, _ = poisson_sparse_matrix(n, m)
rows = rows + 1*(rows >= i_zero) + 1*(rows >= j_zero)
cols = cols + 1*(cols >= i_zero) + 1*(cols >= j_zero)
# adding extra-diag terms
rows = np.concatenate([rows, [i_zero, i_zero-1, j_zero, j_zero-1]])
cols = np.concatenate([cols, [i_zero-1, i_zero, j_zero-1, j_zero]])
vals = np.concatenate([vals, [1., 1., 1., 1.]])
mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols,
(n*m + 2, n*m + 2))
mat.compress_csc()
mat_dense = mat.to_dense()
# Testing a sparse solve for all 50 basis vec
for itest in range(n*m + 2):
b = np.zeros(n*m + 2, dtype=np.float64)
b[itest] = 1.
x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.ones(n*m + 2),
tol=1.e-10)
assert_array_almost_equal(np.dot(mat_dense, x), b)
# 3) Now a simple test that summation of duplicate (i.e. with same rows,
# same cols) entries occurs when compressed.
vals = np.ones(17, dtype=np.float64)
rows = np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1],
dtype=np.int32)
cols = np.array([0, 1, 2, 1, 1, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
dtype=np.int32)
dim = (3, 3)
mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols, dim)
mat.compress_csc()
mat_dense = mat.to_dense()
assert_array_almost_equal(mat_dense, np.array([
[1., 2., 0.], [2., 1., 5.], [0., 5., 1.]], dtype=np.float64))
def test_triinterpcubic_geom_weights():
# Tests to check computation of weights for _DOF_estimator_geom:
# The weight sum per triangle can be 1. (in case all angles < 90 degrees)
# or (2*w_i) where w_i = 1-alpha_i/np.pi is the weight of apex i ; alpha_i
# is the apex angle > 90 degrees.
(ax, ay) = (0., 1.687)
x = np.array([ax, 0.5*ax, 0., 1.])
y = np.array([ay, -ay, 0., 0.])
z = np.zeros(4, dtype=np.float64)
triangles = [[0, 2, 3], [1, 3, 2]]
sum_w = np.zeros([4, 2]) # 4 possibilities ; 2 triangles
for theta in np.linspace(0., 2*np.pi, 14): # rotating the figure...
x_rot = np.cos(theta)*x + np.sin(theta)*y
y_rot = -np.sin(theta)*x + np.cos(theta)*y
triang = mtri.Triangulation(x_rot, y_rot, triangles)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
dof_estimator = mtri.triinterpolate._DOF_estimator_geom(cubic_geom)
weights = dof_estimator.compute_geom_weights()
# Testing for the 4 possibilities...
sum_w[0, :] = np.sum(weights, 1) - 1
for itri in range(3):
sum_w[itri+1, :] = np.sum(weights, 1) - 2*weights[:, itri]
assert_array_almost_equal(np.min(np.abs(sum_w), axis=0),
np.array([0., 0.], dtype=np.float64))
def test_triinterp_colinear():
# Tests interpolating inside a triangulation with horizontal colinear
# points (refer also to the tests :func:`test_trifinder` ).
#
# These are not valid triangulations, but we try to deal with the
# simplest violations (i. e. those handled by default TriFinder).
#
# Note that the LinearTriInterpolator and the CubicTriInterpolator with
# kind='min_E' or 'geom' still pass a linear patch test.
# We also test interpolation inside a flat triangle, by forcing
# *tri_index* in a call to :meth:`_interpolate_multikeys`.
delta = 0. # If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
x0 = np.array([1.5, 0, 1, 2, 3, 1.5, 1.5])
y0 = np.array([-1, 0, 0, 0, 0, delta, 1])
# We test different affine transformations of the initial figure ; to
# avoid issues related to round-off errors we only use integer
# coefficients (otherwise the Triangulation might become invalid even with
# delta == 0).
transformations = [[1, 0], [0, 1], [1, 1], [1, 2], [-2, -1], [-2, 1]]
for transformation in transformations:
x_rot = transformation[0]*x0 + transformation[1]*y0
y_rot = -transformation[1]*x0 + transformation[0]*y0
(x, y) = (x_rot, y_rot)
z = 1.23*x - 4.79*y
triangles = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [1, 2, 5], [2, 3, 5],
[3, 4, 5], [1, 5, 6], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
xs = np.linspace(np.min(triang.x), np.max(triang.x), 20)
ys = np.linspace(np.min(triang.y), np.max(triang.y), 20)
xs, ys = np.meshgrid(xs, ys)
xs = xs.ravel()
ys = ys.ravel()
mask_out = (triang.get_trifinder()(xs, ys) == -1)
zs_target = np.ma.array(1.23*xs - 4.79*ys, mask=mask_out)
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
assert_array_almost_equal(zs_target, zs)
# Testing interpolation inside the flat triangle number 4: [2, 3, 5]
# by imposing *tri_index* in a call to :meth:`_interpolate_multikeys`
itri = 4
pt1 = triang.triangles[itri, 0]
pt2 = triang.triangles[itri, 1]
xs = np.linspace(triang.x[pt1], triang.x[pt2], 10)
ys = np.linspace(triang.y[pt1], triang.y[pt2], 10)
zs_target = 1.23*xs - 4.79*ys
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs, = interp._interpolate_multikeys(
xs, ys, tri_index=itri*np.ones(10, dtype=np.int32))
assert_array_almost_equal(zs_target, zs)
def test_triinterp_transformations():
# 1) Testing that the interpolation scheme is invariant by rotation of the
# whole figure.
# Note: This test is non-trivial for a CubicTriInterpolator with
# kind='min_E'. It does fail for a non-isotropic stiffness matrix E of
# :class:`_ReducedHCT_Element` (tested with E=np.diag([1., 1., 1.])), and
# provides a good test for :meth:`get_Kff_and_Ff`of the same class.
#
# 2) Also testing that the interpolation scheme is invariant by expansion
# of the whole figure along one axis.
n_angles = 20
n_radii = 10
min_radius = 0.15
def z(x, y):
r1 = np.sqrt((0.5-x)**2 + (0.5-y)**2)
theta1 = np.arctan2(0.5-x, 0.5-y)
r2 = np.sqrt((-x-0.2)**2 + (-y-0.2)**2)
theta2 = np.arctan2(-x-0.2, -y-0.2)
z = -(2*(np.exp((r1/10)**2)-1)*30. * np.cos(7.*theta1) +
(np.exp((r2/10)**2)-1)*30. * np.cos(11.*theta2) +
0.7*(x**2 + y**2))
return (np.max(z)-z)/(np.max(z)-np.min(z))
# First create the x and y coordinates of the points.
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0 + n_angles, 2*np.pi + n_angles,
n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x0 = (radii*np.cos(angles)).flatten()
y0 = (radii*np.sin(angles)).flatten()
triang0 = mtri.Triangulation(x0, y0) # Delaunay triangulation
z0 = z(x0, y0)
# Then create the test points
xs0 = np.linspace(-1., 1., 23)
ys0 = np.linspace(-1., 1., 23)
xs0, ys0 = np.meshgrid(xs0, ys0)
xs0 = xs0.ravel()
ys0 = ys0.ravel()
interp_z0 = {}
for i_angle in range(2):
# Rotating everything
theta = 2*np.pi / n_angles * i_angle
x = np.cos(theta)*x0 + np.sin(theta)*y0
y = -np.sin(theta)*x0 + np.cos(theta)*y0
xs = np.cos(theta)*xs0 + np.sin(theta)*ys0
ys = -np.sin(theta)*xs0 + np.cos(theta)*ys0
triang = mtri.Triangulation(x, y, triang0.triangles)
linear_interp = mtri.LinearTriInterpolator(triang, z0)
cubic_min_E = mtri.CubicTriInterpolator(triang, z0)
cubic_geom = mtri.CubicTriInterpolator(triang, z0, kind='geom')
dic_interp = {'lin': linear_interp,
'min_E': cubic_min_E,
'geom': cubic_geom}
# Testing that the interpolation is invariant by rotation...
for interp_key in ['lin', 'min_E', 'geom']:
interp = dic_interp[interp_key]
if i_angle == 0:
interp_z0[interp_key] = interp(xs0, ys0) # storage
else:
interpz = interp(xs, ys)
matest.assert_array_almost_equal(interpz,
interp_z0[interp_key])
scale_factor = 987654.3210
for scaled_axis in ('x', 'y'):
# Scaling everything (expansion along scaled_axis)
if scaled_axis == 'x':
x = scale_factor * x0
y = y0
xs = scale_factor * xs0
ys = ys0
else:
x = x0
y = scale_factor * y0
xs = xs0
ys = scale_factor * ys0
triang = mtri.Triangulation(x, y, triang0.triangles)
linear_interp = mtri.LinearTriInterpolator(triang, z0)
cubic_min_E = mtri.CubicTriInterpolator(triang, z0)
cubic_geom = mtri.CubicTriInterpolator(triang, z0, kind='geom')
dic_interp = {'lin': linear_interp,
'min_E': cubic_min_E,
'geom': cubic_geom}
# Testing that the interpolation is invariant by expansion along
# 1 axis...
for interp_key in ['lin', 'min_E', 'geom']:
interpz = dic_interp[interp_key](xs, ys)
matest.assert_array_almost_equal(interpz, interp_z0[interp_key])
@image_comparison(baseline_images=['tri_smooth_contouring'],
extensions=['png'], remove_text=True)
def test_tri_smooth_contouring():
# Image comparison based on example tricontour_smooth_user.
n_angles = 20
n_radii = 10
min_radius = 0.15
def z(x, y):
r1 = np.sqrt((0.5-x)**2 + (0.5-y)**2)
theta1 = np.arctan2(0.5-x, 0.5-y)
r2 = np.sqrt((-x-0.2)**2 + (-y-0.2)**2)
theta2 = np.arctan2(-x-0.2, -y-0.2)
z = -(2*(np.exp((r1/10)**2)-1)*30. * np.cos(7.*theta1) +
(np.exp((r2/10)**2)-1)*30. * np.cos(11.*theta2) +
0.7*(x**2 + y**2))
return (np.max(z)-z)/(np.max(z)-np.min(z))
# First create the x and y coordinates of the points.
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0 + n_angles, 2*np.pi + n_angles,
n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x0 = (radii*np.cos(angles)).flatten()
y0 = (radii*np.sin(angles)).flatten()
triang0 = mtri.Triangulation(x0, y0) # Delaunay triangulation
z0 = z(x0, y0)
xmid = x0[triang0.triangles].mean(axis=1)
ymid = y0[triang0.triangles].mean(axis=1)
mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
triang0.set_mask(mask)
# Then the plot
refiner = mtri.UniformTriRefiner(triang0)
tri_refi, z_test_refi = refiner.refine_field(z0, subdiv=4)
levels = np.arange(0., 1., 0.025)
plt.triplot(triang0, lw=0.5, color='0.5')
plt.tricontour(tri_refi, z_test_refi, levels=levels, colors="black")
@image_comparison(baseline_images=['tri_smooth_gradient'],
extensions=['png'], remove_text=True)
def test_tri_smooth_gradient():
# Image comparison based on example trigradient_demo.
def dipole_potential(x, y):
""" An electric dipole potential V """
r_sq = x**2 + y**2
theta = np.arctan2(y, x)
z = np.cos(theta)/r_sq
return (np.max(z)-z) / (np.max(z)-np.min(z))
# Creating a Triangulation
n_angles = 30
n_radii = 10
min_radius = 0.2
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
V = dipole_potential(x, y)
triang = mtri.Triangulation(x, y)
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
triang.set_mask(mask)
# Refine data - interpolates the electrical potential V
refiner = mtri.UniformTriRefiner(triang)
tri_refi, z_test_refi = refiner.refine_field(V, subdiv=3)
# Computes the electrical field (Ex, Ey) as gradient of -V
tci = mtri.CubicTriInterpolator(triang, -V)
(Ex, Ey) = tci.gradient(triang.x, triang.y)
E_norm = np.sqrt(Ex**2 + Ey**2)
# Plot the triangulation, the potential iso-contours and the vector field
plt.figure()
plt.gca().set_aspect('equal')
plt.triplot(triang, color='0.8')
levels = np.arange(0., 1., 0.01)
cmap = cm.get_cmap(name='hot', lut=None)
plt.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap,
linewidths=[2.0, 1.0, 1.0, 1.0])
# Plots direction of the electrical vector field
plt.quiver(triang.x, triang.y, Ex/E_norm, Ey/E_norm,
units='xy', scale=10., zorder=3, color='blue',
width=0.007, headwidth=3., headlength=4.)
def test_tritools():
# Tests TriAnalyzer.scale_factors on masked triangulation
# Tests circle_ratios on equilateral and right-angled triangle.
x = np.array([0., 1., 0.5, 0., 2.])
y = np.array([0., 0., 0.5*np.sqrt(3.), -1., 1.])
triangles = np.array([[0, 1, 2], [0, 1, 3], [1, 2, 4]], dtype=np.int32)
mask = np.array([False, False, True], dtype=np.bool)
triang = mtri.Triangulation(x, y, triangles, mask=mask)
analyser = mtri.TriAnalyzer(triang)
assert_array_almost_equal(analyser.scale_factors,
np.array([1., 1./(1.+0.5*np.sqrt(3.))]))
assert_array_almost_equal(
analyser.circle_ratios(rescale=False),
np.ma.masked_array([0.5, 1./(1.+np.sqrt(2.)), np.nan], mask))
# Tests circle ratio of a flat triangle
x = np.array([0., 1., 2.])
y = np.array([1., 1.+3., 1.+6.])
triangles = np.array([[0, 1, 2]], dtype=np.int32)
triang = mtri.Triangulation(x, y, triangles)
analyser = mtri.TriAnalyzer(triang)
assert_array_almost_equal(analyser.circle_ratios(), np.array([0.]))
# Tests TriAnalyzer.get_flat_tri_mask
# Creates a triangulation of [-1, 1] x [-1, 1] with contiguous groups of
# 'flat' triangles at the 4 corners and at the center. Checks that only
# those at the borders are eliminated by TriAnalyzer.get_flat_tri_mask
n = 9
def power(x, a):
return np.abs(x)**a*np.sign(x)
x = np.linspace(-1., 1., n+1)
x, y = np.meshgrid(power(x, 2.), power(x, 0.25))
x = x.ravel()
y = y.ravel()
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1))
analyser = mtri.TriAnalyzer(triang)
mask_flat = analyser.get_flat_tri_mask(0.2)
verif_mask = np.zeros(162, dtype=np.bool)
corners_index = [0, 1, 2, 3, 14, 15, 16, 17, 18, 19, 34, 35, 126, 127,
142, 143, 144, 145, 146, 147, 158, 159, 160, 161]
verif_mask[corners_index] = True
assert_array_equal(mask_flat, verif_mask)
# Now including a hole (masked triangle) at the center. The center also
# shall be eliminated by get_flat_tri_mask.
mask = np.zeros(162, dtype=np.bool)
mask[80] = True
triang.set_mask(mask)
mask_flat = analyser.get_flat_tri_mask(0.2)
center_index = [44, 45, 62, 63, 78, 79, 80, 81, 82, 83, 98, 99, 116, 117]
verif_mask[center_index] = True
assert_array_equal(mask_flat, verif_mask)
def test_trirefine():
# Testing subdiv=2 refinement
n = 3
subdiv = 2
x = np.linspace(-1., 1., n+1)
x, y = np.meshgrid(x, x)
x = x.ravel()
y = y.ravel()
mask = np.zeros(2*n**2, dtype=np.bool)
mask[n**2:] = True
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1),
mask=mask)
refiner = mtri.UniformTriRefiner(triang)
refi_triang = refiner.refine_triangulation(subdiv=subdiv)
x_refi = refi_triang.x
y_refi = refi_triang.y
n_refi = n * subdiv**2
x_verif = np.linspace(-1., 1., n_refi+1)
x_verif, y_verif = np.meshgrid(x_verif, x_verif)
x_verif = x_verif.ravel()
y_verif = y_verif.ravel()
ind1d = np.in1d(np.around(x_verif*(2.5+y_verif), 8),
np.around(x_refi*(2.5+y_refi), 8))
assert_array_equal(ind1d, True)
# Testing the mask of the refined triangulation
refi_mask = refi_triang.mask
refi_tri_barycenter_x = np.sum(refi_triang.x[refi_triang.triangles],
axis=1) / 3.
refi_tri_barycenter_y = np.sum(refi_triang.y[refi_triang.triangles],
axis=1) / 3.
tri_finder = triang.get_trifinder()
refi_tri_indices = tri_finder(refi_tri_barycenter_x,
refi_tri_barycenter_y)
refi_tri_mask = triang.mask[refi_tri_indices]
assert_array_equal(refi_mask, refi_tri_mask)
# Testing that the numbering of triangles does not change the
# interpolation result.
x = np.asarray([0.0, 1.0, 0.0, 1.0])
y = np.asarray([0.0, 0.0, 1.0, 1.0])
triang = [mtri.Triangulation(x, y, [[0, 1, 3], [3, 2, 0]]),
mtri.Triangulation(x, y, [[0, 1, 3], [2, 0, 3]])]
z = np.sqrt((x-0.3)*(x-0.3) + (y-0.4)*(y-0.4))
# Refining the 2 triangulations and reordering the points
xyz_data = []
for i in range(2):
refiner = mtri.UniformTriRefiner(triang[i])
refined_triang, refined_z = refiner.refine_field(z, subdiv=1)
xyz = np.dstack((refined_triang.x, refined_triang.y, refined_z))[0]
xyz = xyz[np.lexsort((xyz[:, 1], xyz[:, 0]))]
xyz_data += [xyz]
assert_array_almost_equal(xyz_data[0], xyz_data[1])
def meshgrid_triangles(n):
"""
Utility function.
Returns triangles to mesh a np.meshgrid of n x n points
"""
tri = []
for i in range(n-1):
for j in range(n-1):
a = i + j*(n)
b = (i+1) + j*n
c = i + (j+1)*n
d = (i+1) + (j+1)*n
tri += [[a, b, d], [a, d, c]]
return np.array(tri, dtype=np.int32)
def test_triplot_return():
# Check that triplot returns the artists it adds
from matplotlib.figure import Figure
ax = Figure().add_axes([0.1, 0.1, 0.7, 0.7])
triang = mtri.Triangulation(
[0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0],
triangles=[[0, 1, 3], [3, 2, 0]])
if ax.triplot(triang, "b-") is None:
raise AssertionError("triplot should return the artist it adds")
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
kirangonella/BuildingMachineLearningSystemsWithPython | ch09/01_fft_based_classifier.py | 24 | 3740 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from collections import defaultdict
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.metrics import confusion_matrix
from utils import plot_pr, plot_roc, plot_confusion_matrix, GENRE_LIST
from fft import read_fft
genre_list = GENRE_LIST
def train_model(clf_factory, X, Y, name, plot=False):
labels = np.unique(Y)
cv = ShuffleSplit(
n=len(X), n_iter=1, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = defaultdict(list)
precisions, recalls, thresholds = defaultdict(
list), defaultdict(list), defaultdict(list)
roc_scores = defaultdict(list)
tprs = defaultdict(list)
fprs = defaultdict(list)
clfs = [] # just to later get the median
cms = []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
scores.append(test_score)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cms.append(cm)
for label in labels:
y_label_test = np.asarray(y_test == label, dtype=int)
proba = clf.predict_proba(X_test)
proba_label = proba[:, label]
precision, recall, pr_thresholds = precision_recall_curve(
y_label_test, proba_label)
pr_scores[label].append(auc(recall, precision))
precisions[label].append(precision)
recalls[label].append(recall)
thresholds[label].append(pr_thresholds)
fpr, tpr, roc_thresholds = roc_curve(y_label_test, proba_label)
roc_scores[label].append(auc(fpr, tpr))
tprs[label].append(tpr)
fprs[label].append(fpr)
if plot:
for label in labels:
print("Plotting %s" % genre_list[label])
scores_to_sort = roc_scores[label]
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
desc = "%s %s" % (name, genre_list[label])
plot_pr(pr_scores[label][median], desc, precisions[label][median],
recalls[label][median], label='%s vs rest' % genre_list[label])
plot_roc(roc_scores[label][median], desc, tprs[label][median],
fprs[label][median], label='%s vs rest' % genre_list[label])
all_pr_scores = np.asarray(pr_scores.values()).flatten()
summary = (np.mean(scores), np.std(scores),
np.mean(all_pr_scores), np.std(all_pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors), np.asarray(cms)
def create_model():
from sklearn.linear_model.logistic import LogisticRegression
clf = LogisticRegression()
return clf
if __name__ == "__main__":
X, y = read_fft(genre_list)
train_avg, test_avg, cms = train_model(
create_model, X, y, "Log Reg FFT", plot=True)
cm_avg = np.mean(cms, axis=0)
cm_norm = cm_avg / np.sum(cm_avg, axis=0)
plot_confusion_matrix(cm_norm, genre_list, "fft",
"Confusion matrix of an FFT based classifier")
| mit |
Myasuka/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
PredictiveScienceLab/pysmc | examples/reaction_kinetics_run_nompi.py | 2 | 1378 | """
Solve the reaction kinetics inverse problem.
"""
import reaction_kinetics_model
import sys
import os
import pymc
sys.path.insert(0, os.path.abspath('..'))
import pysmc
import matplotlib.pyplot as plt
import pickle
if __name__ == '__main__':
model = reaction_kinetics_model.make_model()
# Construct the SMC sampler
mcmc = pymc.MCMC(model)
mcmc.use_step_method(pysmc.LognormalRandomWalk, model['k1'])
mcmc.use_step_method(pysmc.LognormalRandomWalk, model['k2'])
mcmc.use_step_method(pysmc.LognormalRandomWalk, model['sigma'])
smc_sampler = pysmc.SMC(mcmc, num_particles=100,
num_mcmc=1, verbose=1,
gamma_is_an_exponent=True)
# Initialize SMC at gamma = 0.01
smc_sampler.initialize(0.0)
# Move the particles to gamma = 1.0
smc_sampler.move_to(1.)
# Get a particle approximation
p = smc_sampler.get_particle_approximation()
print(p.mean)
print(p.variance)
data = [p.particles[i]['stochastics']['k1'] for i in range(p.num_particles)]
data = np.array(data)
plt.plot(data, np.zeros(data.shape), 'ro', markersize=10)
pysmc.hist(p, 'mixture')
data = [p.particles[i]['stochastics']['k2'] for i in range(p.num_particles)]
data = np.array(data)
plt.plot(data, np.zeros(data.shape), 'bo', markersize=10)
pysmc.hist(p, 'mixture')
| lgpl-3.0 |
deot95/Tesis | Proyecto de Grado Ingeniería Electrónica/Workspace/Comparison/Small Linear/plot_vols_and_perf_mpc.py | 1 | 4026 | import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
import linear_env_small as linear_env
from numpy.matlib import repmat
state_dim = 5
Hs = 1800
A1 = 0.0020
mu1 = 250
sigma1 = 70
A2 = 0.0048
mu2 = 250
sigma2 = 70
dt = 1
x = np.arange(Hs)
d = np.zeros((2,Hs))
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
delta1 = 2
delta2 = 20
delta3 = 2
delta4 = 6
deltaT = 10
delta = np.asarray([delta1,delta2, delta3, delta4, deltaT])
data = np.genfromtxt('volumes_small_mpc.csv', delimiter=',')
data_u = np.genfromtxt('actions_mpc_small.csv',delimiter=',')
print(np.shape(data_u))
def perfm(v):
return np.linalg.norm(np.multiply(np.power(delta,2),v),ord=2)
env = linear_env.env(1,d,vref)
data = np.append(data, np.zeros((5,600)),axis=1)
total_cost = 0
for i in range(Hs):
total_cost += perfm(data[:,i])
print(total_cost)
data_u = np.transpose(np.append(data_u, np.zeros((4,600)),axis=1))
#data_u = np.transpose(data_u)
resv_norm = np.transpose(np.divide(data,np.transpose(repmat(env.vmax,Hs,1))))
#resv_norm = np.transpose(data)
ticksize = 16
font_labels = 16
font_legends = 22
width = 3
f, axarr = plt.subplots(nrows=2,ncols=2,figsize=(14,5),sharex=True)
lines = axarr[0,0].plot(x,resv_norm[:,:3],linewidth=width)
axarr[0,0].tick_params(labelsize=ticksize)
axarr[0,0].legend(lines , list(map(lambda x: "v"+str(x+1),range(3))),prop={'size':font_legends})
axarr[0,0].set_title("Volumes - Tanks 1 to 3",fontsize=font_labels)
#axarr[0,0].set_xlabel("Time(s)",fontsize=font_labels)
axarr[0,0].set_ylabel("Volume(%vmax)",fontsize=font_labels)
lines = axarr[0,1].plot(x,resv_norm[:,3:5],linewidth=width)
axarr[0,1].tick_params(labelsize=ticksize)
axarr[0,1].legend(lines , list(map(lambda x: "v"+str(x+1) if x+1!=5 else "vT",range(3,5))),prop={'size':font_legends})
axarr[0,1].set_title("Volumes - Tank 4 and Storm Tank",fontsize=font_labels)
#axarr[0,1].set_xlabel("Time(s)",fontsize=font_labels)
#axarr[0,1].set_ylabel("Volume(%vmax)",fontsize=font_labels)
lines = axarr[1,0].plot(x,data_u[:,:2],linewidth=width)
axarr[1,0].tick_params(labelsize=ticksize)
axarr[1,0].legend(lines , list(map(lambda x: "u"+str(x+1) if x+1!=5 else "vT",range(2))),prop={'size':font_legends})
axarr[1,0].set_title("Control actions - Valves 1 and 2",fontsize=font_labels)
axarr[1,0].set_xlabel("Time(s)",fontsize=font_labels)
axarr[1,0].set_ylabel(r"Flow ($m^3/s$)",fontsize=font_labels)
axarr[1,0].ticklabel_format(style='sci', axis='y',scilimits=(0,0))
lines = axarr[1,1].plot(x,data_u[:,2:],linewidth=width)
axarr[1,1].tick_params(labelsize=ticksize)
axarr[1,1].legend(lines , list(map(lambda x: "u"+str(x+1) if x+1!=5 else "vT",range(2,4))),prop={'size':font_legends})
axarr[1,1].set_title("Control actions - Valve 3 and 4",fontsize=font_labels)
axarr[1,1].set_xlabel("Time(s)",fontsize=font_labels)
#axarr[1,1].set_ylabel(r"Flow ($m^3/s$)",fontsize=font_labels)
plt.tight_layout()
plt.show()
f = plt.figure(figsize=(12,6))
plt.plot(x,resv_norm[:,1],linewidth=width,label="v2")
plt.xlabel("Time(s)",fontsize=font_labels)
plt.ylabel("Volume(%vmax)",fontsize=font_labels)
plt.tick_params(labelsize=ticksize)
plt.legend(prop={'size':font_labels})
plt.show()
'''
f, axarr = plt.subplots(nrows=1,ncols=2,figsize=(14,5))
lines = axarr[0].plot(x,resv_norm[:,:3],linewidth=3)
axarr[0].tick_params(labelsize=ticksize)
axarr[0].legend(lines , list(map(lambda x: "v"+str(x+1),range(3))),prop={'size':22})
axarr[0].set_title("Volumes - Tanks 1 to 3",fontsize= 22)
axarr[0].set_xlabel("Time(s)",fontsize=22)
axarr[0].set_ylabel("Volume(%vmax)",fontsize=22)
lines = axarr[1].plot(x,resv_norm[:,3:5],linewidth=3)
axarr[1].tick_params(labelsize=ticksize)
axarr[1].legend(lines , list(map(lambda x: "v"+str(x+1) if x+1!=5 else "vT",range(3,5))),prop={'size':22})
axarr[1].set_title("Volumes - Tank 4 and Storm Tank",fontsize=22)
axarr[1].set_xlabel("Time(s)",fontsize=22)
axarr[1].set_ylabel("Volume(%vmax)",fontsize=22)
plt.tight_layout()
plt.show()
''' | mit |
indranilsinharoy/PyZDDE | Examples/Scripts/couplingEfficiencySingleModeFibers.py | 2 | 4996 | #-------------------------------------------------------------------------------
# Name: couplingEfficiencySingleModeFibers.py
# Purpose: Demonstrate the following function related to POP in Zemax:
# zGetPOP(), zSetPOPSettings(), zModifyPOPSettings()
# Calculates the fiber coupling efficiency between a Gaussian beam
# in free space that is focused into a fiber.
#
# NOTE: Please note that this code uses matplotlib plotting library from
# http://matplotlib.org/ for 2D-plotting
#
# Copyright: (c) 2012 - 2014
# Licence: MIT License
#-------------------------------------------------------------------------------
from __future__ import print_function, division
import pyzdde.zdde as pyz
import matplotlib.pyplot as plt
import math
import os
ln = pyz.createLink()
curDir = os.path.dirname(os.path.realpath(__file__))
samplesDir = ln.zGetPath()[1]
lens = ['Simple Lens.zmx', 'Fiber Coupling.zmx']
popfile = os.path.join(samplesDir, 'Physical Optics', lens[0])
cfgFile = os.path.join(curDir, "coupEffSgleModePOPEx.CFG")
# load pop file into Zemax server
ln.zLoadFile(popfile)
# source gaussian beam (these parameters can be deduced using the paraxial
# gaussian beam calculator under the Analyze menu in Zemax):
# beam type = Gaussian Size + Angle;
# size-x/y (beam waist) = 2 mm;
# angle x/y in degrees (divergence) = 0.00911890
# Tot power = 1
# fiber coupling integral parameters:
# beam type = Gaussian Size + Angle;
# size-x/y = 0.008 mm;
# angle x/y in degrees (divergence) = 2.290622
# display parameters
# sampling grid 256 by 256; x/y-width = 40 by 40;
srcParam = ((1, 2, 3, 4), (2, 2, 0.00911890, 0.00911890))
fibParam = ((1, 2, 3, 4), (0.008, 0.008, 2.290622, 2.290622))
# Setup POP analysis
ln.zSetPOPSettings(data=0, settingsFile=cfgFile, startSurf=1, endSurf=1,
field=1, wave=1, beamType=2, paramN=srcParam, tPow=1,
sampx=4, sampy=4, widex=40, widey=40, fibComp=1, fibType=2,
fparamN=fibParam)
# compute and get POP data (irradiance) at the source surface
popInfo_src_irr, data_src_irr = ln.zGetPOP(settingsFile=cfgFile, displayData=True)
# modify the POP settings to display the irradiance plot at the focused point
# (end_surf = 4)
errStat = ln.zModifyPOPSettings(cfgFile, endSurf=4)
print('Modify Settings: errStat =', errStat)
# get data at fiber
popInfo_dst_irr, data_dst_irr = ln.zGetPOP(settingsFile=cfgFile, displayData=True)
# modify the POP settings to get Phase data at the source surface. Note that
# when changing the data type, we need to pass all settings again.
ln.zSetPOPSettings(data=1, settingsFile=cfgFile, startSurf=1, endSurf=1,
field=1, wave=1, beamType=2, paramN=srcParam, tPow=1,
sampx=4, sampy=4, widex=40, widey=40, fibComp=1, fibType=2,
fparamN=fibParam)
# compute and get the POP phase data
popInfo_src_phase, data_src_phase = ln.zGetPOP(settingsFile=cfgFile, displayData=True)
# again, modify the POP settings to display the phase plot at the focused point
# (end_surf = 4)
errStat = ln.zModifyPOPSettings(cfgFile, endSurf=4)
print('Modify Settings: errStat =', errStat)
# get phase data at fiber
popInfo_dst_phase, data_dst_phase = ln.zGetPOP(settingsFile=cfgFile, displayData=True)
# close the DDE link
ln.close()
# print useful information
print("\nPop information (irradiance) at the source surface: ")
print(popInfo_src_irr)
print("\nPop information (irradiance) at the fiber surface: ")
print(popInfo_dst_irr)
print("\nCoupling efficiency: ", popInfo_dst_irr[4])
# plot the beam at the source and at the fiber
fig = plt.figure(facecolor='w')
# irradiance data
ax = fig.add_subplot(2,2,1)
ax.set_title('Irradiance at source')
ext = [-popInfo_src_irr.widthX/2, popInfo_src_irr.widthX/2,
-popInfo_src_irr.widthY/2, popInfo_src_irr.widthY/2]
ax.imshow(data_src_irr, extent=ext, origin='lower')
ax.set_xlabel('x (mm)'); ax.set_ylabel('y (mm)')
ax = fig.add_subplot(2,2,2)
ax.set_title('Irradiance at fiber')
ext = [-popInfo_dst_irr.widthX/2, popInfo_dst_irr.widthX/2,
-popInfo_dst_irr.widthY/2, popInfo_dst_irr.widthY/2]
ax.imshow(data_dst_irr, extent=ext, origin='lower')
ax.set_xlabel('x (mm)'); ax.set_ylabel('y (mm)')
# phase data
ax = fig.add_subplot(2,2,3)
ax.set_title('Phase at source')
ext = [-popInfo_src_phase.widthX/2, popInfo_src_phase.widthX/2,
-popInfo_src_phase.widthY/2, popInfo_src_phase.widthY/2]
ax.imshow(data_src_phase, extent=ext, origin='lower',
vmin=-math.pi, vmax=math.pi)
ax.set_xlabel('x (mm)'); ax.set_ylabel('y (mm)')
ax = fig.add_subplot(2,2,4)
ax.set_title('Phase at fiber')
ext = [-popInfo_dst_phase.widthX/2, popInfo_dst_phase.widthX/2,
-popInfo_dst_phase.widthY/2, popInfo_dst_phase.widthY/2]
ax.imshow(data_dst_phase, extent=ext, origin='lower',
vmin=-math.pi, vmax=math.pi)
ax.set_xlabel('x (mm)'); ax.set_ylabel('y (mm)')
fig.tight_layout()
plt.show() | mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/pylab_examples/fill_between_demo.py | 6 | 2268 | #!/usr/bin/env python
import matplotlib.mlab as mlab
from matplotlib.pyplot import figure, show
import numpy as np
x = np.arange(0.0, 2, 0.01)
y1 = np.sin(2*np.pi*x)
y2 = 1.2*np.sin(4*np.pi*x)
fig = figure()
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312, sharex=ax1)
ax3 = fig.add_subplot(313, sharex=ax1)
ax1.fill_between(x, 0, y1)
ax1.set_ylabel('between y1 and 0')
ax2.fill_between(x, y1, 1)
ax2.set_ylabel('between y1 and 1')
ax3.fill_between(x, y1, y2)
ax3.set_ylabel('between y1 and y2')
ax3.set_xlabel('x')
# now fill between y1 and y2 where a logical condition is met. Note
# this is different than calling
# fill_between(x[where], y1[where],y2[where]
# because of edge effects over multiple contiguous regions.
fig = figure()
ax = fig.add_subplot(211)
ax.plot(x, y1, x, y2, color='black')
ax.fill_between(x, y1, y2, where=y2>=y1, facecolor='green', interpolate=True)
ax.fill_between(x, y1, y2, where=y2<=y1, facecolor='red', interpolate=True)
ax.set_title('fill between where')
# Test support for masked arrays.
y2 = np.ma.masked_greater(y2, 1.0)
ax1 = fig.add_subplot(212, sharex=ax)
ax1.plot(x, y1, x, y2, color='black')
ax1.fill_between(x, y1, y2, where=y2>=y1, facecolor='green', interpolate=True)
ax1.fill_between(x, y1, y2, where=y2<=y1, facecolor='red', interpolate=True)
ax1.set_title('Now regions with y2>1 are masked')
# This example illustrates a problem; because of the data
# gridding, there are undesired unfilled triangles at the crossover
# points. A brute-force solution would be to interpolate all
# arrays to a very fine grid before plotting.
# show how to use transforms to create axes spans where a certain condition is satisfied
fig = figure()
ax = fig.add_subplot(111)
y = np.sin(4*np.pi*x)
ax.plot(x, y, color='black')
# use the data coordinates for the x-axis and the axes coordinates for the y-axis
import matplotlib.transforms as mtransforms
trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
theta = 0.9
ax.axhline(theta, color='green', lw=2, alpha=0.5)
ax.axhline(-theta, color='red', lw=2, alpha=0.5)
ax.fill_between(x, 0, 1, where=y>theta, facecolor='green', alpha=0.5, transform=trans)
ax.fill_between(x, 0, 1, where=y<-theta, facecolor='red', alpha=0.5, transform=trans)
show()
| mit |
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_epochs_to_data_frame.py | 1 | 8900 | """
.. _tut_io_export_pandas:
=================================
Export epochs to Pandas DataFrame
=================================
In this example the pandas exporter will be used to produce a DataFrame
object. After exploring some basic features a split-apply-combine
work flow will be conducted to examine the latencies of the response
maxima across epochs and conditions.
Note. Equivalent methods are available for raw and evoked data objects.
Short Pandas Primer
-------------------
Pandas Data Frames
~~~~~~~~~~~~~~~~~~
A data frame can be thought of as a combination of matrix, list and dict:
It knows about linear algebra and element-wise operations but is size mutable
and allows for labeled access to its data. In addition, the pandas data frame
class provides many useful methods for restructuring, reshaping and visualizing
data. As most methods return data frame instances, operations can be chained
with ease; this allows to write efficient one-liners. Technically a DataFrame
can be seen as a high-level container for numpy arrays and hence switching
back and forth between numpy arrays and DataFrames is very easy.
Taken together, these features qualify data frames for inter operation with
databases and for interactive data exploration / analysis.
Additionally, pandas interfaces with the R statistical computing language that
covers a huge amount of statistical functionality.
Export Options
~~~~~~~~~~~~~~
The pandas exporter comes with a few options worth being commented.
Pandas DataFrame objects use a so called hierarchical index. This can be
thought of as an array of unique tuples, in our case, representing the higher
dimensional MEG data in a 2D data table. The column names are the channel names
from the epoch object. The channels can be accessed like entries of a
dictionary:
df['MEG 2333']
Epochs and time slices can be accessed with the .ix method:
epochs_df.ix[(1, 2), 'MEG 2333']
However, it is also possible to include this index as regular categorial data
columns which yields a long table format typically used for repeated measure
designs. To take control of this feature, on export, you can specify which
of the three dimensions 'condition', 'epoch' and 'time' is passed to the Pandas
index using the index parameter. Note that this decision is revertible any
time, as demonstrated below.
Similarly, for convenience, it is possible to scale the times, e.g. from
seconds to milliseconds.
Some Instance Methods
~~~~~~~~~~~~~~~~~~~~~
Most numpy methods and many ufuncs can be found as instance methods, e.g.
mean, median, var, std, mul, , max, argmax etc.
Below an incomplete listing of additional useful data frame instance methods:
apply : apply function to data.
Any kind of custom function can be applied to the data. In combination with
lambda this can be very useful.
describe : quickly generate summary stats
Very useful for exploring data.
groupby : generate subgroups and initialize a 'split-apply-combine' operation.
Creates a group object. Subsequently, methods like apply, agg, or transform
can be used to manipulate the underlying data separately but
simultaneously. Finally, reset_index can be used to combine the results
back into a data frame.
plot : wrapper around plt.plot
However it comes with some special options. For examples see below.
shape : shape attribute
gets the dimensions of the data frame.
values :
return underlying numpy array.
to_records :
export data as numpy record array.
to_dict :
export data as dict of arrays.
Reference
~~~~~~~~~
More information and additional introductory materials can be found at the
pandas doc sites: http://pandas.pydata.org/pandas-docs/stable/
"""
# Author: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import mne
import matplotlib.pyplot as plt
import numpy as np
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = mne.io.read_raw_fif(raw_fname)
raw.set_eeg_reference('average', projection=True) # set EEG average reference
# For simplicity we will only consider the first 10 epochs
events = mne.read_events(event_fname)[:10]
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = -0.2, 0.5
baseline = (None, 0)
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(auditory_l=1, auditory_r=2, visual_l=3, visual_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, preload=True, reject=reject)
###############################################################################
# Export DataFrame
# The following parameters will scale the channels and times plotting
# friendly. The info columns 'epoch' and 'time' will be used as hierarchical
# index whereas the condition is treated as categorial data. Note that
# this is optional. By passing None you could also print out all nesting
# factors in a long table style commonly used for analyzing repeated measure
# designs.
index, scaling_time, scalings = ['epoch', 'time'], 1e3, dict(grad=1e13)
df = epochs.to_data_frame(picks=None, scalings=scalings,
scaling_time=scaling_time, index=index)
# Create MEG channel selector and drop EOG channel.
meg_chs = [c for c in df.columns if 'MEG' in c]
df.pop('EOG 061') # this works just like with a list.
###############################################################################
# Explore Pandas MultiIndex
# Pandas is using a MultiIndex or hierarchical index to handle higher
# dimensionality while at the same time representing data in a flat 2d manner.
print(df.index.names, df.index.levels)
# Inspecting the index object unveils that 'epoch', 'time' are used
# for subsetting data. We can take advantage of that by using the
# .ix attribute, where in this case the first position indexes the MultiIndex
# and the second the columns, that is, channels.
# Plot some channels across the first three epochs
xticks, sel = np.arange(3, 600, 120), meg_chs[:15]
df.ix[:3, sel].plot(xticks=xticks)
mne.viz.tight_layout()
# slice the time starting at t0 in epoch 2 and ending 500ms after
# the base line in epoch 3. Note that the second part of the tuple
# represents time in milliseconds from stimulus onset.
df.ix[(1, 0):(3, 500), sel].plot(xticks=xticks)
mne.viz.tight_layout()
# Note: For convenience the index was converted from floating point values
# to integer values. To restore the original values you can e.g. say
# df['times'] = np.tile(epoch.times, len(epochs_times)
# We now reset the index of the DataFrame to expose some Pandas
# pivoting functionality. To simplify the groupby operation we
# we drop the indices to treat epoch and time as categroial factors.
df = df.reset_index()
# The ensuing DataFrame then is split into subsets reflecting a crossing
# between condition and trial number. The idea is that we can broadcast
# operations into each cell simultaneously.
factors = ['condition', 'epoch']
sel = factors + ['MEG 1332', 'MEG 1342']
grouped = df[sel].groupby(factors)
# To make the plot labels more readable let's edit the values of 'condition'.
df.condition = df.condition.apply(lambda name: name + ' ')
# Now we compare the mean of two channels response across conditions.
grouped.mean().plot(kind='bar', stacked=True, title='Mean MEG Response',
color=['steelblue', 'orange'])
mne.viz.tight_layout()
# We can even accomplish more complicated tasks in a few lines calling
# apply method and passing a function. Assume we wanted to know the time
# slice of the maximum response for each condition.
max_latency = grouped[sel[2]].apply(lambda x: df.time[x.argmax()])
print(max_latency)
# Then make the plot labels more readable let's edit the values of 'condition'.
df.condition = df.condition.apply(lambda name: name + ' ')
plt.figure()
max_latency.plot(kind='barh', title='Latency of Maximum Response',
color=['steelblue'])
mne.viz.tight_layout()
# Finally, we will again remove the index to create a proper data table that
# can be used with statistical packages like statsmodels or R.
final_df = max_latency.reset_index()
final_df.rename(columns={0: sel[2]}) # as the index is oblivious of names.
# The index is now written into regular columns so it can be used as factor.
print(final_df)
plt.show()
# To save as csv file, uncomment the next line.
# final_df.to_csv('my_epochs.csv')
# Note. Data Frames can be easily concatenated, e.g., across subjects.
# E.g. say:
#
# import pandas as pd
# group = pd.concat([df_1, df_2])
# group['subject'] = np.r_[np.ones(len(df_1)), np.ones(len(df_2)) + 1]
| bsd-3-clause |
jesserobertson/pynoddy | pynoddy/experiment/TopologyAnalysis.py | 1 | 84153 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 15 12:14:08 2015
@author: Sam Thiele
"""
import os
import numpy as np
import scipy as sp
import math
from pynoddy.experiment.MonteCarlo import MonteCarlo
from pynoddy.output import NoddyTopology
from pynoddy.output import NoddyOutput
from pynoddy.history import NoddyHistory
'''
Performs a topological uncertainty analysis on a noddy model.
'''
class ModelRealisation:
'''
Class containing information regarding an individual model realisation.
This essentially just bundles a history class and NoddyTopology class together (plus a bit
of extra information like basename etc)
'''
def __init__(self, history_file, **kwds ):
#get keywords
vb = kwds.get("verbose",False)
self.history_path=history_file
self.basename=history_file.split('.')[0] #remove file extension
if not os.path.exists(self.history_path):
print "Error: please specify a valid noddy history file (*.his)"
return
#load history file
self.history = NoddyHistory(history_file, verbose=vb)
#load topology network
self.topology = NoddyTopology(self.basename) #overall topology
#add sub-topology networks
#self.litho_topology = self.topology.collapse_topology() #lithological topology
#self.struct_topology = self.topology.collapse_stratigraphy() #structural topology
def define_parameter_space( self,parameters ):
'''
Sets the parameters used to locate this model in parameter space.
**Arguments**:
- *parameters* = A list of tuples containing event number and variable names (strings).
These need to match noddy parameters Eg [ (2,'dip'),(2,'slip'),(3,'x') ].
'''
self.headings = []
self.params = [] #array containing values
for v in parameters:
if len(v) != 2:
print "Warning: %s does not match the tuple format (eventID,parameter name)." % v
self.headings.append("%d_%s" % (v[0],v[1])) #heading format is eventID_name: eg. 2_dip
self.params.append( float(self.history.get_event_param(v[0],v[1])) )
def get_parameters(self):
'''
Gets the location of this model in parameter space
**Returns**:
- a tuple containing a list of parameter names and a list of parameter values
'''
return [self.headings,self.params]
def get_geology(self):
'''
Returns a NoddyOut object containing the voxel volume representing the geology
of this model. Note that these can be large objects, so try not loading too
many at once...
**Returns**
- a NoddyOut object containing this geological model.
'''
return NoddyOutput(self.basename)
@staticmethod
def get_parameter_space(models,parameters):
'''
Produces a data matrix describing the location of the provided models in the specified
parameter space.
**Arguments**:
- *models* = a list of models to include in the parameter space
- *parameters* = A list of tuples containig the parameters which make-up the desired
parameter space. Each parameter is defined by a tuple containing an
event number and parameter name, eg. (2, dip) represents the dip of
the second noddy event.
'''
#retreive data
data = []
for m in models:
m.define_parameter_space(parameters)
data.append( (m.basename, m.get_parameters()[1] )) #tuple containing (name, [data,..])
#define data panda
import pandas
data_matrix = pandas.DataFrame.from_items(data,orient='index',columns=models[0].headings)
return data_matrix
@staticmethod
def loadModels( path, **kwds ):
'''
Loads all noddy models realisations and returns them as an array of ModelRealisation objects
**Arguments**:
- *path* = The root directory that models should be loaded from. All models with the same base_name
as this class will be loaded (including subdirectoriess)
**Optional Keywords**:
- *verbose* = True if this function should write debug information to the print buffer. Default is True.
**Returns**:
- a list of ModelRealisation objects
'''
vb = kwds.get('verbose',True)
if vb:
print "Loading models in %s" % path
#array of topology objects
realisations = []
for root, dirnames, filenames in os.walk(path): #walk the directory
for f in filenames:
if ('.his' in f): #find all topology files
p = os.path.join(root,f)
if vb:
print 'Loading %s' % p
#load model
realisations.append( ModelRealisation(p,verbose=vb) )
return realisations
class TopologyAnalysis:
def __init__(self,path, params=None,n=None, **kwds):
'''
Performs a topological uncertainty analysis. If a directory is given, all the history files within
the directory are loaded and the analyses performed on them. If a history file is given, n perturbations
are performed on it using the params file.
**Arguments**:
- *path* = The directory or history file to perform this analysis on.
**Optional Arguments**:
- *params* = The params file to use for MonteCarlo perturbation (if a history file is provided)
- *n* = The number of model perturbations to generate (if a history file is provided)
**Optional Keywords**:
- *output* = The path to populate with models. Default is the base path (+ history name).
- *verbose* = True if this experiment should write to the print buffer. Default is True
- *threads* = The number of threads this experiment should utilise. The default is 4.
- *force* = True if all noddy models should be recalculated. Default is False.
'''
#init variables
self.base_history_path = None
self.base_path = path #if a history file has been given, this will be changed
vb = kwds.get("verbose",True)
n_threads = kwds.get("threads",4)
force = kwds.get("force",False)
if not params is None:
self.params_file = params
#a history file has been given, generate model stuff
if '.' in path:
if not '.his' in path: #foobar
print "Error: please provide a valid history file (*.his)"
return
if params is None or n is None: #need this info
print "Error: please provide valid arguments [params,n]"
self.base_history_path = path
self.num_trials = n
#calculate output path
if kwds.has_key('output'):
self.base_path = kwds['output']
else:
self.base_path=path.split('.')[0] #trim file extension
#ensure path exists
if not os.path.exists(self.base_path):
os.makedirs(self.base_path)
#generate & store initial topology
self.base_model = ModelRealisation(path) #load provided his file
#do monte carlo simulations
if (n > 0):
MC = MonteCarlo(path,params)
MC.generate_model_instances(self.base_path,n, sim_type='TOPOLOGY', verbose=vb, threads=n_threads, write_changes=None)
else:
#ensure that models have been run
MonteCarlo.generate_models_from_existing_histories(self.base_path,sim_type='TOPOLOGY',force_recalculate=force,verbose=vb,threads=n_threads)
#load models from base directory
self.models = ModelRealisation.loadModels(self.base_path, verbose=vb)
###########################################
#GENERATE TOPOLOGY LISTS
###########################################
self._generate_lists()
############################################
#FIND UNIQUE TOPOLOGIES
############################################
self._find_unique_topologies()
############################################
#GENERATE SUPER TOPOLOGY
############################################
self._generate_super_topology()
#############################################
#Calculate & store intitial topologies
#############################################
if hasattr(self,"base_model"):
self.initial_topology = self.base_model.topology
self.initial_topo_id = self.initial_topology.find_first_match( self.unique_topologies )
self.models.append( self.base_model )
if self.initial_topo_id == -1:
self.unique_topologies.append(self.initial_topology)
self.unique_frequency.append(1)
self.initial_topo_id = len(self.unique_topologies) - 1
self.topo_type_ids.append(len(self.models) - 1) #append type
print "Warning: all topologies generated are different to the initial topology"
self.initial_litho_topology = self.initial_topology.collapse_topology()
self.initial_litho_id = self.initial_litho_topology.find_first_match( self.unique_litho_topologies )
if self.initial_litho_id == -1: #highly unlikely, but possible
self.unique_litho_topologies.append(self.initial_litho_topology)
self.unique_litho_frequency.append(1)
self.initial_litho_id = len(self.unique_litho_topologies) - 1
self.litho_type_ids.append(len(self.models) - 1)
print "Warning: all litho topologies generated are different to the initial topology!" #we probably want to know this
self.initial_struct_topology = self.initial_topology.collapse_stratigraphy()
self.initial_struct_id = self.initial_struct_topology.find_first_match( self.unique_struct_topologies )
if self.initial_struct_id == -1: #even more highly unlikely (but still possible...()
self.unique_struct_topologies.append(self.initial_struct_topology)
self.unique_struct_frequency.append(1)
self.initial_struct_id = len(self.unique_struct_topologies) - 1
self.struct_type_ids.append(len(self.models) - 1)
print "Warning: all struct topologies generated are different to the initial topology!!!" #we probably want to know this
self.unique_ids.append(self.initial_topo_id)
self.unique_struct_ids.append(self.initial_struct_id)
self.unique_litho_ids.append(self.initial_litho_id)
def _find_unique_topologies(self):
self.accumulate_topologies = []
self.accumulate_litho_topologies = []
self.accumulate_struct_topologies = []
self.unique_ids = []
self.unique_litho_ids = []
self.unique_struct_ids = []
self.unique_frequency = []
self.unique_litho_frequency = []
self.unique_struct_frequency = []
self.unique_topologies = NoddyTopology.calculate_unique_topologies(self.all_topologies,
output=self.accumulate_topologies,
ids = self.unique_ids,
frequency=self.unique_frequency)
self.unique_litho_topologies=NoddyTopology.calculate_unique_topologies(self.all_litho_topologies,
output=self.accumulate_litho_topologies,
ids = self.unique_litho_ids,
frequency=self.unique_litho_frequency)
self.unique_struct_topologies=NoddyTopology.calculate_unique_topologies(self.all_struct_topologies,
output=self.accumulate_struct_topologies,
ids=self.unique_struct_ids,
frequency=self.unique_struct_frequency)
#sort topology
self._sort_topologies_by_frequency()
#RETRIEVE ID's OF UNIQUE MODELS ('Type' models)
self.topo_type_ids = [ self.unique_ids.index(i) for i in range(len(self.unique_topologies)) ]
self.struct_type_ids = [ self.unique_struct_ids.index(i) for i in range(len(self.unique_struct_topologies)) ]
self.litho_type_ids = [ self.unique_litho_ids.index(i) for i in range(len(self.unique_litho_topologies)) ]
def _generate_super_topology(self):
self.super_topology = NoddyTopology.combine_topologies(self.all_topologies)
self.super_litho_topology = NoddyTopology.combine_topologies(self.all_litho_topologies)
self.super_struct_topology = NoddyTopology.combine_topologies(self.all_struct_topologies)
def _generate_lists(self):
#declare lists
self.all_topologies = [] #full topology network
self.all_litho_topologies=[] #only contains lithological adjacency
self.all_struct_topologies=[] #only contains 'structural volume' adjacency
#generate lists
for m in self.models:
self.all_topologies.append(m.topology)
self.all_litho_topologies.append(m.topology.collapse_topology())
self.all_struct_topologies.append(m.topology.collapse_stratigraphy())
def _sort_topologies_by_frequency(self):
'''
Sorts self.unique_litho_topologies and self.unique_struct_topologies by
observation frequency. Note that information on the order topologies were
initially encountered will be lost (though I can't think what this info
might be used for...)
'''
from itertools import izip
#generate tempory id's so we can retain id mapping
t_id = range(len(self.unique_topologies))
#zip lists & sort by frequency in reverse order (highest to lowst)
s = sorted(izip(self.unique_topologies, t_id, self.unique_frequency),reverse=True,key=lambda x:x[2])
#unzip lists
self.unique_topologies, t_id, self.unique_frequency = [[x[i] for x in s] for i in range(3)]
#remap id's using t_id
for i in range( len(self.unique_ids) ):
for n in range( len(t_id) ):
if self.unique_ids[i] == t_id[n]: #find matching id
self.unique_ids[i] = n #remap id
break
#repeat for litho lists
t_id = range(len(self.unique_litho_topologies))
s = sorted(izip(self.unique_litho_topologies, t_id, self.unique_litho_frequency),reverse=True,key=lambda x:x[2])
self.unique_litho_topologies, t_id, self.unique_litho_frequency = [[x[i] for x in s] for i in range(3)]
for i in range( len(self.unique_litho_ids) ):
for n in range( len(t_id) ):
if self.unique_litho_ids[i] == t_id[n]: #find matching id
self.unique_litho_ids[i] = n #remap id
break
#repeat for structural lists
#zip lists & sort by frequency in reverse order (highest to lowst)
t_id = range(len(self.unique_struct_topologies))
s = sorted(izip(self.unique_struct_topologies, t_id, self.unique_struct_frequency),reverse=True,key=lambda x:x[2])
self.unique_struct_topologies, t_id, self.unique_struct_frequency = [[x[i] for x in s] for i in range(3)]
for i in range( len(self.unique_struct_ids) ):
for n in range( len(t_id) ):
if self.unique_struct_ids[i] == t_id[n]:
self.unique_struct_ids[i] = n
break
#and, like magic, it's done!
def remove_unlikely_models(self, threshold=95):
'''
Removes unlikely (infrequent) model realisations. Note that this can be slow!
**Arguments**:
- *threshold* = the percentage of all models to retain. The chance of observing an observed
model in a single random sample is equal to (100-threshold)/100. If threshold
is left as 95%, the bottom 5% of the model frequency distribution is removed.
'''
p = (100-threshold) / 100.0 #0.05 if threshold is 95%, 0.1 for 90% etc.
n = p * len(self.models)
i=0
while i < n:
#get frequency of least likely model
freq = self.unique_frequency[-1]
if (i+freq) <= n: #we can completely remove this topology without exceding n
#loop through uids
p=0
while p < len(self.unique_ids):
if self.unique_ids[p] == len(self.unique_topologies) - 1:
del self.unique_ids[p] #delete reference in id list
del self.models[p] #delete model
else:
p+=1
del self.topo_type_ids[-1]
del self.unique_topologies[-1] #remove last item from unique topo list
del self.unique_frequency[-1]
i+=freq #tally how many models we've removed
else: #we can only remove some of the models
self.unique_frequency[-1] -= n - i
p=0
while p < len(self.unique_ids):
if True == self.unique_ids[p] == len(self.unique_topologies) - 1 and p != self.topo_type_ids[-1]:
del self.unique_ids[p]
del self.models[p]
else:
p+=1
break #all finished
#recalculate topology lists, unique topologies & supertopologies
self._generate_lists()
self._find_unique_topologies()
self._generate_super_topology()
def get_type_model( self, typeID, topology_type = '' ):
'''
Retrieves the type model for a given unique topology id (from one of the self.unique_topology arrays).
The type model is the first instance of this topology type that was encountered.
**Arguments**
- *typeID* = the ID of the defining unique topology (from one of the self.unique_topology lists)
- *topology_type* = The type of topology you are interested in. This should be either '' (full topology), 'litho'
or 'struct'
**Returns**
- a TopologyAnalysis.ModelRealisation object from which geology, history or topology info can be retrieved.
'''
t_list= []
if topology_type == '': #default is all
t_list = self.topo_type_ids
elif 'litho' in topology_type:
t_list = self.litho_type_ids
elif 'struct' in topology_type:
t_list = self.struct_type_ids
else:
print "Error: Invalid topology_type. This should be '' (full topology), 'litho' or 'struct'"
try:
return self.models[ t_list[typeID] ]
except IndexError:
print ("Error - type model index is out of range. Please ensure that topology_type is correct" +
"and the topology you are looking for actually exists.")
return None
def get_parameter_space(self,params=None,recalculate=False):
'''
Returns a scipy.pandas dataframe containing the location of models in parameter space.
Two columns, t_litho and t_struct are appended to this dataframe, and contain the id's
of their equivalents in the unique_litho_topologies and unique_struct_topologies
respectively.
**Optional Arguments**:
- *params* = Either a path to a .csv file containing information on the parameters that define
this model space or a list containing tuples (eventID,parameter_name) defining the
axes of the model space. If left as None the params file used to generate model
variations is used (at self.params_file). If this does not exist/has not been defined
an error is thrown.
- *recalculate* = If True, the function is forced to recalculate the model space. Default is False,
hence this will return the last calculated model space.
**Returns**:
- a scipy.pandas data matrix containing model locations in parameter space, and their membership
of the various classes of topology that have been identified
'''
#see if param space has already been calculated
if (not recalculate) and hasattr(self,"parameter_space"):
return self.parameter_space
if params == None: #try and retrieve params
if hasattr(self,"params_file"):
params = self.params_file
else:
print "Error: parameter information is not available. Please provide a params argument"
return
#if params is a csv file
if ".csv" in params:
f = open(params,'r')
#read lines
lines = open(params).readlines()
#get header
header = lines[0].rstrip().split(',')
params = []
for line in lines[1:]:
#split into columns
l = line.rstrip().split(',')
#load events & parameters
e=None
p=None
if l[0] == '': break # end of entries
for ele in header:
if ele == 'event': #get event id
e = int(l[header.index(ele)])
continue
if ele == 'parameter': #get parameter
p = l[header.index(ele)]
continue
if not e is None and not p is None: #found both
params.append((e,p)) #store
break #done
f.close()
#retrieve data from models
data_matrix = ModelRealisation.get_parameter_space(self.models,params)
#append topology id's collumn
data_matrix["u_topo"] = self.unique_ids
data_matrix["u_litho"] = self.unique_litho_ids
data_matrix["u_struct"] = self.unique_struct_ids
#store for future use
self.parameter_space = data_matrix
return data_matrix
def get_average_node_count(self,topology_type=''):
'''
Calculates the average number of nodes in all of the model realisations that are part of this
experiment.
**Arguments**
- *topology_type* = The type of topology you are interested in. This should be either '' (full topology), 'litho'
or 'struct'
**Returns**
- The average number of nodes
'''
t_list= []
if topology_type == '':
t_list = self.all_topologies
elif 'litho' in topology_type:
t_list = self.all_litho_topologies
elif 'struct' in topology_type:
t_list = self.all_struct_topologies
else:
print "Error: Invalid topology_type. This should be '' (full topologies), 'litho' or 'struct'"
avg = 0.0
for t in t_list:
avg += t.graph.number_of_nodes() / float(len(self.all_litho_topologies))
return avg
def get_average_edge_count(self,topology_type=''):
'''
Calculates the average number of nodes in all of the model realisations that are part of this
experiment.
**Arguments**
- *topology_type* = The type of topology you are interested in. This should be either '' (full topology), 'litho'
or 'struct'
**Returns**
- The average number of nodes
'''
t_list= []
if topology_type == '': #default is all
t_list = self.all_topologies
elif 'litho' in topology_type:
t_list = self.all_litho_topologies
elif 'struct' in topology_type:
t_list = self.all_struct_topologies
else:
print "Error: Invalid topology_type. This should be '' (full topology), 'litho' or 'struct'"
avg = 0.0
for t in t_list:
avg += t.graph.number_of_edges() / float(len(t_list))
return avg
def get_variability(self,topology_type=''):
'''
Returns the 'variability' of model topology. This is equal to the total number of observed
adjacency relationships (network edges) divided by the average number of adjacency
relationships (edges) in each model realisation minus one. This value will be equal to 0 if
all the topologies are identical, and increase as more different topological varieties come into
existance. The maximum possible 'variability', when every edge in every topology realisation is
different, is equal to the sum of the number of edges in all the networks divided by the average
number of edges.
**Arguments**
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
'''
try:
if topology_type == '':
return -1 + (self.super_topology.number_of_edges() / self.get_average_edge_count(''))
elif 'litho' in topology_type:
return -1 + (self.super_litho_topology.number_of_edges() / self.get_average_edge_count('litho'))
elif 'struct' in topology_type:
return -1 + (self.super_struct_topology.number_of_edges() / self.get_average_edge_count('struct'))
else:
print "Error: Invalid topology_type. This should be '' (full topology), 'litho' or 'struct'"
except ZeroDivisionError: #average edge count = 0
print "Warning: empty or disconnected graphs. Average edge count = 0"
return 0
def get_difference_matrix(self,topology_type='struct'):
'''
Calculates a difference matrix in which each matrix element Exy contains 1 over the jaccard
coefficient of topologies x and y.
**Arguments**
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
**Returns**
- A difference matrix
'''
t_list= []
if topology_type == '':
if hasattr(self,'difference_matrix'):
return self.difference_matrix
t_list = self.unique_topologies
elif 'litho' in topology_type:
if hasattr(self,'litho_difference_matrix'): #already been calculated
return self.litho_difference_matrix
t_list = self.unique_litho_topologies
elif 'struct' in topology_type:
if hasattr(self,'struct_difference_matrix'):
return self.struct_difference_matrix
t_list = self.unique_struct_topologies
else:
print "Error: Invalid topology_type. This should be '' (full topology), 'litho' or 'struct'"
if len(t_list) <= 1: #need more than one model to build a dm...
print "Error: cannot build a distance matrix containing only one model..."
return None
difference_matrix=np.zeros( (len(t_list),len(t_list)))
for i in range (0,len(t_list)):
for j in range (0,len(t_list)):
if i==j: #minor speed optimisation
difference_matrix[i][j] = 0.0
elif i < j:
jq = t_list[i].jaccard_coefficient(t_list[j])
if jq == 1:
print "Warning: difference matrix contains identical models."
#nb: similarity = 1 if networks are identical and approaches zero as they become different
difference_matrix[i][j] = -1 + 1.0 / jq #calculate difference
difference_matrix[j][i] = difference_matrix[i][j] #matrix is symmetric
#store
if topology_type == '':
self.difference_matrix = difference_matrix
elif 'litho' in topology_type:
self.litho_difference_matrix = difference_matrix
elif 'struct' in topology_type:
self.struct_difference_matrix = difference_matrix
return difference_matrix #return the difference matrix
def plot_dendrogram(self,topology_type='struct',path="",dpi=300):
'''
Calculates the average number of nodes in all of the model realisations that are part of this
experiment.
**Arguments**
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
- *path* = A path to save the image to. If left as "" the image is drawn to the screen.
- *dpi* = The resolution of the saved figure
'''
#get difference matrix (NB. squareform converts it to a condensed matrix for scipy)
import scipy.spatial.distance as dist
import scipy.cluster.hierarchy as clust
dm = self.get_difference_matrix(topology_type)
if dm is None:
print "Error: could not build dendrogram for %s topologies" % topology_type
return
m_dif = dist.squareform( dm,force='tovector' )
if topology_type == '':
title = 'Hierarchical Classification of Overall Topology'
n = len(self.unique_topologies)
freq = self.unique_frequency
elif 'litho' in topology_type:
title = 'Hierarchical Classification of Lithological Topology'
n = len(self.unique_litho_topologies)
freq = self.unique_litho_frequency
elif 'struct' in topology_type:
title = 'Hierarchical Classification of Structural Topology'
n = len(self.unique_struct_topologies)
freq = self.unique_struct_frequency
#generate dendrogram using UPGMA
Z = clust.average(m_dif)
#generate plot
import matplotlib.pyplot as plt
f, ax = plt.subplots()
#calculate leaf colours (using frequency)
import matplotlib.cm as cm
if n < 1000:
clust.dendrogram(Z,ax=ax)
#set colours
#max_f = max(freq) #for normalization
for lbl in ax.get_xmajorticklabels():
c = cm.gray( int(lbl.get_text()) / (1.25*float(n ))) #node label = unique topology id
lbl.set_color(c)
else: #truncate dendrogram
clust.dendrogram(Z,ax=ax,p=15,truncate_mode='level',show_leaf_counts=True)
#rotate labels
for l in ax.xaxis.get_ticklabels():
l.set_rotation(90)
#size plot
f.set_figwidth( min(0.2 * n,100)) #max size is 100 inches
f.set_figheight(8)
f.suptitle("")
ax.set_title(title)
if path == "":
f.show()
else:
f.savefig(path,dpi=dpi)
def boxplot(self,topology_type='struct',params=None,path="",**kwds):
'''
Generates a series of boxplot tiles showing the range of variables that has produced
different topology types.
**Optional Arguments**:
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
- *params* = a list of parameters. A boxplot will be generated for each parameter
in this list. The default is all the parameters in the params_file
argument. If this is not defined (ie. this class has not purturbed
the history files) then an error is thrown.
Params can be passed either as a path to a .csv file containing information on the parameters that define
this model space or a list containing tuples (eventID,parameter_name) defining the
axes of the model space. If left as None the params file used to generate model
variations is used (at self.params_file). If this does not exist/has not been defined
an error is thrown.
- *path* = a file path to write the image to. If left as '', the image is displayed on the screen.
- *dpi* = The resolution of the saved figure
**Optional Kewords**:
- *height* = the height of each diagram in this figure (in inches). Default is 3 inches.
- *dpi* = the resolution of the figure (in dpi). Default is 300.
'''
#get args
f_height = float(kwds.get('height',3.3)) #each plot is 3 inches high
dpi = kwds.get('dpi',300)
if params==None:
if hasattr(self,"params_file"):
params=self.params_file
else:
print "Error - please specify a parameter list (or file) to plot."
#get group factor & frequency
initial_id=-1
if topology_type == '':
group = 'u_topo'
title = 'Overall Topologies'
freq = self.unique_frequency
if hasattr(self,"initial_topo_id"):
initial_id = self.initial_topo_id
ids = self.unique_ids
elif "litho" in topology_type:
title = 'Lithological Topologies'
group = 'u_litho'
freq = self.unique_litho_frequency
if hasattr(self,"initial_litho_id"):
initial_id = self.initial_litho_id
ids = self.unique_litho_ids
elif "struct" in topology_type:
title = 'Structural Topologies'
group = 'u_struct'
freq = self.unique_struct_frequency
if hasattr(self,"initial_struct_id"):
initial_id = self.initial_struct_id
ids = self.unique_struct_ids
else:
print "Error: Invalid topology_type. This should be '' (full topology), 'litho' or 'struct'"
return
#get data
data = self.get_parameter_space(params)
#create figure
import matplotlib.pyplot as plt
plt.ioff()
#calculate dims
n = len(self.models[0].headings) #number of graphs
rows = n + 1 #1 row per boxplot + extra row for frequency graph
f_height = f_height * rows
f_width = min( len(freq) * 0.2+1 , 100) #0.1 inch per item + 1 inch extra space
#generate axes
f,ax = plt.subplots(rows, sharex='col')
#ax = ax.ravel()[0:n] #convert to 1d array
#draw boxplots
data.boxplot(ax = ax[0:n], column=self.models[0].headings,by=group)
#draw bar graph
l=np.arange(len(freq))[:] + 0.5
#colours
cols=['b'] * len(freq)
if initial_id != -1 and initial_id < len(freq):
cols[initial_id] = 'r' #make the initial one red
rects = ax[-1].bar(left=l,width=1, height=freq, color=cols)
#labels
for r in rects:
height = r.get_height()
ax[-1].text(r.get_x(), height, '%d'%int(height),
ha='left', va='bottom')
#set automatic limits
for a in ax:
a.set_ylim()
a.set_aspect('auto') #'equal'
a.set_xlabel("")
a.set_ylabel( a.get_title() )
a.set_title("")
for l in a.xaxis.get_ticklabels():
l.set_rotation(90)
#ax[-1].set_ylim(max(freq) * 1.02)
ax[-1].set_xlabel("Topology")
ax[-1].set_ylabel("Frequency")
#tweak spacing
#f.subplots_adjust(hspace=0.6,wspace=0.5)
ax[0].set_title(title)
f.set_figwidth(f_width)
f.set_figheight(f_height)
f.suptitle("")
#return/save figure
if path=='':
f.show()
else:
f.savefig(path,dpi=dpi)
#return f
def histogram(self,params=None,path="", **kwds):
'''
Plots a histogram matrix showing all the distribution of parameters in model space.
**Optional Arguments**:
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
- *params* = a list of parameters. A boxplot will be generated for each parameter
in this list. The default is all the parameters in the params_file
argument. If this is not defined (ie. this class has not purturbed
the history files) then an error is thrown.
Params can be passed either as a path to a .csv file containing information on the parameters that define
this model space or a list containing tuples (eventID,parameter_name) defining the
axes of the model space. If left as None the params file used to generate model
variations is used (at self.params_file). If this does not exist/has not been defined
an error is thrown.
- *path* = a file path to write the image to. If left as '', the image is displayed on the screen.
**Optional Keywords**:
- *width* = The width of each histogram in inches. Default is 3.
- *height* = The height of each histogram in inches. Default is 2.5.
- *dpi* = The resolution of the saved figure. Default is 300
- *cols* = The number of columns to fit in a figure. Default is 3.
'''
width = kwds.get('width',3.)
height = kwds.get('height',2.5)
dpi = kwds.get('dpi',300)
cols = kwds.get('cols',3)
if params==None:
if hasattr(self,"params_file"):
params=self.params_file
else:
print "Error - please specify a parameter list (or file) to plot."
#get data
data = self.get_parameter_space(params)
m_space = data.drop(['u_topo','u_litho','u_struct'],1)
#make histogram
import matplotlib.pyplot as plt
import math
#calculate dims
n = len(self.models[0].headings) #number of graphs
rows = int(math.ceil( n / float(cols))) #calculate rows needed given the number of columns
#generate axes
f,ax = plt.subplots(rows,cols)
#retrieve list of needed axes
ax = ax.ravel()[0:n]
#draw histogram
m_space.hist(ax=ax)
for a in ax:
a.set_aspect('auto') #'equal'
for l in a.xaxis.get_ticklabels():
l.set_rotation(90)
#tweak spacing
f.subplots_adjust(hspace=0.6,wspace=0.5)
f.suptitle("")
#set size
f.set_size_inches( cols * width, rows * height )
if path=='':
f.show()
else:
f.savefig(path,dpi=dpi)
def plot_cumulative_topologies(self,topology_type='', path="",dpi=300):
'''
Plots the specified cumulative topology count.
**Optional Arguments**:
- *topology_type* = The type of topology you are interested in. This should be either '' (all topologies),
'litho' or 'struct'.
- *path* = a file path to write the image to. If left as '', the image is displayed on the screen.
- *dpi* = The resolution of the saved figure
'''
if topology_type == '':
c = self.accumulate_topologies
title="Cumulative Observed Topologies"
elif 'litho' in topology_type:
c = self.accumulate_litho_topologies
title="Cumulative Observed Lithological Topologies"
elif 'struct' in topology_type:
c = self.accumulate_struct_topologies
title="Cumulative Observed Structural Topologies"
else:
print "Error: Invalid topology_type. This should be '' (full topologies), 'litho' or 'struct'"
return
import matplotlib.pyplot as plt
f, ax = plt.subplots()
#plot graph
ax.plot(c)
ax.set_title(title)
ax.set_xlabel('Trial Number')
ax.set_ylabel('Unique Topology Count')
if path == "":
f.show()
else:
f.savefig(path,dpi=dpi)
def plot_parallel_coordinates( self,topology_id ,topology_type='struct',params=None, **kwds):
'''
Plots the specified topology/topologies on a parallell coordinates
diagram to give an indication of their location in parameter space.
**Arguments**:
- *topology_id*: A list of topology id's to plot. The id's correspond
to the location of the topologies in the unique_topologies lists.
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
- *params* = a list of parameters. A boxplot will be generated for each parameter
in this list. The default is all the parameters in the params_file
argument. If this is not defined (ie. this class has not purturbed
the history files) then an error is thrown.
Params can be passed either as a path to a .csv file containing information on the parameters that define
this model space or a list containing tuples (eventID,parameter_name) defining the
axes of the model space. If left as None the params file used to generate model
variations is used (at self.params_file). If this does not exist/has not been defined
an error is thrown.
- *path* = a file path to write the image to. If left as '', the image is displayed on the screen.
**Optional Keywords**:
- *path* = a file path to write the image to. If left as '', the image is displayed on the screen.
- *dpi* = The resolution of the saved figure
- *width* = the width of the figure (in inches). Default is 10 inches.
- *height* = the height of this figure (in inches). Default is 5 inches.
'''
from pandas.tools.plotting import parallel_coordinates
data = self.get_parameter_space(params)
path = kwds.get('path','')
dpi = kwds.get('dpi',300)
width = kwds.get('width',10)
height = kwds.get('height',5)
#get collumn
if topology_type == '':
col='u_topo'
title = 'Overall Topology'
elif "litho" in topology_type:
col='u_litho'
title = 'Lithological Topology'
elif "struct" in topology_type:
title = 'Structural Topology'
col='u_struct'
else:
print "Error: Invalid topology_type. This should be '' (full topologies), 'litho' or 'struct'"
return
#normalise data...
norm = (data - data.mean()) / (data.max() - data.min())
norm.drop(['u_topo','u_struct','u_litho'],1) #remove int columns
#subset
sub = norm[data[col].isin(topology_id)]
sub[col] = data[col] #re add necessary columns
#plot
import matplotlib.pyplot as plt
f,ax = plt.subplots()
parallel_coordinates(sub, col,ax=ax)
f.set_figwidth(width)
f.set_figheight(height)
ax.set_title(title)
#return/save figure
if path=='':
f.show()
else:
f.savefig(path,dpi=dpi)
def plot_frequency_distribution(self,topology_type='struct',**kwds):
'''
Plots a cumulative frequency distribution.
**Arguments**
- *topology_type* = The type of topology you are interested in. This should be either '' (full topology),
'litho' or 'struct'
**Optional Keywords**'
- *logx* - plot x axis on a log scale. Default is False.
- *logy* - plot y axis on a log scale. Default is False.
- *cumulative* - plots cumulative frequency distribution. Default is True.
- *path* - a path to save the image to
- *dpi* - the resolution of the resulting figure. Default is 300.
- *width* - the width of the resulting figure (inches). Default is 5.
- *height* - The height of the resulting figure (inches). Default is 5.
'''
#get kwds
logx = kwds.get('logx',False)
logy = kwds.get('logy',False)
cumulative = kwds.get('cumulative',True)
dpi = kwds.get('dpi',300)
width = kwds.get('width',5)
height = kwds.get('height',5)
#get data
if topology_type == '':
freq=self.unique_frequency
title='Observed Topology Frequency Distribution'
elif 'struct' in topology_type:
freq=self.unique_struct_frequency
title='Structural Topology Frequency Distribution'
elif 'litho' in topology_type:
freq=self.unique_litho_frequency
title='Lithological Topology Frequency Distribution'
else:
print "Error: Invalid topology_type. This should be '' (full topology), 'litho' or 'struct'"
return
#calculate number of models in bins of each frequency
if not cumulative:
obs = [0] * max(freq)
for i in range(max(freq)):
total=0
for f in freq:
if f == i+1:
total+=f
obs[i] = total #ie. the number of model topologies seen i times.
if cumulative:
obs = [0] * max(freq)
for i in range(max(freq)):
total = 0
for f in freq:
if f <= i+1:
total+=f
obs[i] = total #ie. the number of model topologies seen less than i times.
title = 'Cumulative %s' % title
#expand last block (purely for aesthetic reasons)
for i in range(10):
obs.append(obs[-1])
#normalise
obs = [ x / float(len(self.models)) for x in obs ]
#build plot
import matplotlib as matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
#convert to percentages
def to_percent(y,position):
# Ignore the passed in position. This has the effect of scaling the default
# tick locations.
s = str(100 * y)
# The percent symbol needs escaping in latex
if matplotlib.rcParams['text.usetex'] == True:
return s + r'$\%$'
else:
return s + '%'
#plot cumulative distribution
f,ax = plt.subplots()
l=np.arange(len(obs))
ax.bar(l,obs,1,linewidth=0)
#set x range
ax.set_xlim(0,l[-1] + 1)
#label y axis with %
formatter = FuncFormatter(to_percent)
ax.yaxis.set_major_formatter(formatter)
ax.set_xlabel('Model Frequency')
ax.set_ylabel('Percent of Observed Topologies')
ax.set_title(title)
f.set_figwidth(width)
f.set_figheight(height)
if logx:
ax.set_xscale('log')
if logy:
ax.set_yscale('log')
if kwds.has_key('path'):
f.savefig( kwds.get('path'), dpi=dpi)
else:
f.show()
def maximum_separation_plot(self,topology_type='strut',params=None,**kwds):
'''
Plots the topologies such that there is maximum separation between clusters of the
same type of topology. This method attempts to best represent n-dimensional clustering
in 2D, and is usefull for models were there are too many parameters to build a scatter
matrix.
'''
print "Not implemented yet. Sorry"
def plot_scatter_matrix(self,param_pairs=None,topology_type='struct',params=None, **kwds):
'''
Plots a matrix of scatter plots showing the distribution of the specified topologies in
model space.
**Arguments**:
- *param_pairs*: A list of parameter pairs (tuples) to display. If left as none then
all parameters are drawn (though if there are greater than 5 parameters an
error is thrown.)
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
- *params* = a list of parameters defining parameter space. The default is all the parameters in the params_file
argument. If this is not defined (ie. this class has not purturbed
the history files) then an error is thrown.
Params can be passed either as a path to a .csv file containing information on the parameters that define
this model space or a list containing tuples (eventID,parameter_name) defining the
axes of the model space. If left as None the params file used to generate model
variations is used (at self.params_file). If this does not exist/has not been defined
an error is thrown.
**Optional Keywords**:
- *path* = a file path to write the image to. If left as '', the image is displayed on the screen.
- *dpi* = The resolution of the saved figure
- *width* = the width of each scatter plot (in inches). Default is 3 inches.
- *height* = the height of each scatter plot (in inches). Default is 3 inches.
- *alpha* = the alpha value to use for each dot (between 0 and 1). Default is 0.8.
'''
import math
import matplotlib.pyplot as plt
import matplotlib.cm as cm
cols=kwds.get('cols',3)
path = kwds.get('path','')
dpi = kwds.get('dpi',300)
width = kwds.get('width',10)
height = kwds.get('height',5)
alpha = kwds.get('alpha',0.8)
#get data
param_space = self.get_parameter_space(params)
#get relevent topology column
initial_id = -1
if topology_type == '':
title = "Overall Topology Matrix"
col='u_topo'
if hasattr(self,"initial_topo_id"):
initial_id = self.initial_topo_id
param_space = param_space.drop(['u_litho','u_struct'],1) #drop unwanted columns
elif "litho" in topology_type:
title = "Lithological Topology Matrix"
col='u_litho' #we're interested in litho
if hasattr(self,"initial_litho_id"):
initial_id = self.initial_litho_id
param_space = param_space.drop(['u_topo','u_struct'],1) #drop unwanted columns
elif "struct" in topology_type:
title = "Structural Topology Matrix"
col='u_struct' #we're interested in struct
if hasattr(self,"initial_struct_id"):
initial_id = self.initial_struct_id
param_space = param_space.drop(['u_topo','u_litho'],1) #drop unwanted columns
else:
print "Error: Invalid topology_type. This should be '' (full topology), 'litho' or 'struct'"
return
if param_pairs != None:
#make data frames containing relevant columns
param_pairs.append(col)
data = param_space[param_pairs]
else:
if len(param_space.columns) <= 5:
data = param_space #work on entire dataset
else:
print "You be crazy - %d panels is to many for a scatter matrix." % math.factorial(len(param_space.columns)-1)
return
#group
grp = data.groupby(col)
#calculate grid dimensions
headings = list(data.columns)
headings.remove(col)
n=math.factorial((len(headings)-1))
rows = int(math.ceil( n / float(cols) ))
#get initial model params
if initial_id != -1: #initial model is known
i_events = self.base_model.history.events
#make plots
f,ax = plt.subplots(rows,cols)
ax=ax.ravel() #convert to 1d array
i = 0 #Index of plot we're working on
for x in headings:
for y in headings:
if x < y:
#plot groups
scale=255 / len(grp)
for n, g in grp:
#nb. also try 'cm.Set1' or cm.Paired colour maps
g.plot(kind='scatter',x=x,y=y,color=cm.Set1(n*scale,alpha=alpha),s=4,ax=ax[i])
#plot initial model
if (initial_id != -1): #initial model is known
c = cm.Set1(scale * initial_id) #colour
event1=int(x.split('_')[0])
event2=int(y.split('_')[0])
param1=x.split('_')[1]
param2=y.split('_')[1]
ax[i].plot(i_events[event1].properties[param1],i_events[event2].properties[param2],marker='o',mec=c,mew=2,fillstyle='none')
#axis stuf
#ax[i].set_aspect('auto') #'equal'
for l in ax[i].xaxis.get_ticklabels():
l.set_rotation(90)
#next graph
i+=1
#tweak figure spacing
f.subplots_adjust(hspace=0.4,wspace=0.4)
f.suptitle(title)
f.set_figwidth(width * cols)
f.set_figheight(height * rows)
#return/save figure
if path=='':
f.show()
else:
f.savefig(path,dpi=dpi)
def get_n_most_frequent_models( self, n = 8, topology_type = ''):
'''
Retrieves a list of the n most frequent (and hence most likely) models.
**Arguments**:
- *n* = the number of models to get
- *topology_type* = the type of topology used to identify unique models ('','struct' or 'litho')
**Returns**:
- a tuple containing: 1) a list of models
2) a list of model ID's
3) a list of topology UIDs
'''
#get data
if topology_type == '':
topo=self.unique_topologies
t_id=self.topo_type_ids
elif 'struct' in topology_type:
topo=self.unique_struct_topologies
t_id=self.struct_type_ids
elif 'litho' in topology_type:
topo=self.unique_litho_topologies
t_id=self.litho_type_ids
else:
print "Error: Invalid topology_type. This should be '' (full topology), 'litho' or 'struct'"
return
#make sure we're not asking for too many models
if n > len(topo):
n = len(topo)
#unique topologies are already sorted, so we just want to get the first n models
uids = range(n)
ids = [ t_id[i] for i in uids ]
models = [ self.models[i] for i in ids ]
return (models,ids,uids)
def get_n_from_clusters(self, n = 8, topology_type = '' ):
'''
Returns n ModelRealisation objects that are selected such that they
are at the centroid of n groups chosen from a UPGMA dendrogram of model space.
This means that this selection of models should express the variability within
model space as best as possible.
Note that while these models express the variability, the models themselves are
usually very improbable (ie. they were not observed many times). Hence they should
be viewed as 'end-member topologies', not as representative of the likely topologies.
**Arguments**:
- *topology_type* = the type of topology used to identify unique models ('','struct' or 'litho')
- *n* = the number of unique models to plot. These will be chosen such that they
are maximally representative by building a dendrogram of model space,
cutting it such that it contians n clusters and identifying models closest to
the center of each cluster.
**Returns**:
- a tuple (models,uids,ids), ie: a tuple containing:
references to a list of ModelRealisation objects,
a list of the unique id's for each of the model realisation objects, and
a list containing the (global) id's of these objects (in the self.models list)
'''
import scipy.cluster.hierarchy as clust
import scipy.spatial.distance as dist
#get data
if topology_type == '':
topo=self.unique_topologies
t_id=self.topo_type_ids
elif 'struct' in topology_type:
topo=self.unique_struct_topologies
t_id=self.struct_type_ids
elif 'litho' in topology_type:
topo=self.unique_litho_topologies
t_id=self.litho_type_ids
else:
print "Error: Invalid topology_type. This should be '' (full topology), 'litho' or 'struct'"
return
#compute tree
dm = self.get_difference_matrix(topology_type)
if dm is None: #empty distance matrix...
print "Warning: only one model of this type has been found."
return ([self.models[0]],[0],[0]) #models are all identical, return first one (it's as good as any)
m_dif = dist.squareform( dm,force='tovector' )
Z = clust.average(m_dif)
#extract n groups from tree
cluster_ids = clust.fcluster(Z,n,criterion='maxclust') #extract n clusters
#calculate most central model to each cluster
#centroids=[]
centroid_ids = []
for i in range(1,max(cluster_ids)+1): #max(cluster_ids) usually, but not necessarilly, returns n
#extract the i'th cluster
c = [topo[t] for t in range( len(topo) ) if cluster_ids[t] == i] #topologies
m_ids = [t for t in range( len(topo) ) if cluster_ids[t] == i] #model id's (corresponding to topology)
if len(c) > 0: #if this cluster exists
#do pairwise comparisons and compute average distances
dist=[0] * len(c)
for i in range(len(c)):
for j in range(len(c)):
if i > j: #we only need to compute a half distance matrix
jc = c[i].jaccard_coefficient(c[j])
dist[i] += jc
dist[j] += jc
else:
break #go to next loop
#centroids.append( clust[ dist.index(min(dist)) ] ) #get model most similar to all others in the cluster
centroid_ids.append( m_ids[ dist.index(min(dist)) ])
#retrieve models
return ([self.models[ t_id[i] ] for i in centroid_ids], centroid_ids, [t_id[i] for i in centroid_ids ])
def plot_n_models( self, n=8,topology_type = '', criterion='probability', **kwds ):
'''
Produces a grid of renders n unique topologies observed in this experiment.
**Arguments**:
- *topology_type* = the type of topology used to identify unique models ('','struct' or 'litho')
- *n* = the number of unique models to plot.
- *criterion* = the criterion used to select the models. This should either be 'probability' or
'clustering'. If 'probability' is selected, get_n_most_frequent_models() is used to retrieve
models. If 'clustering' is selected, get_n_from_clusters() is used. Please see the definitions
of these methods for specific details.
**Optional Keywords**:
- *path* = the path to the resulting image as. Default is '' (no image saved)
- *dpi* = the resoltuion of the resulting image
- *width* = the width of each tile in the grid. Default is 2 inches.
- *cols* = the number of tiles to fit accross the image. Default is 4.
- *uid* = label the tiles with topology id rather than model id. Default is False.
- *direction* = 'x', 'y', 'z' : coordinate direction of section plot (default: 'y')
- *position* = int or 'center' : cell position of section as integer value
or identifier (default: 'center')
- *ax* = matplotlib.axis : append plot to axis (default: create new plot)
- *figsize* = (x,y) : matplotlib figsize
- *colorbar* = bool : plot colorbar (default: True)
- *colorbar_orientation* = 'horizontal' or 'vertical' : orientation of colorbar
(default: 'vertical')
- *title* = string : plot title
- *cmap* = matplotlib.cmap : colormap (default: YlOrRd)
- *ve* = float : vertical exaggeration
- *layer_labels* = list of strings: labels for each unit in plot
- *layers_from* = noddy history file : get labels automatically from history file
- *data* = np.array : data to plot, if different to block data itself
- *litho_filter* = a list of lithologies to draw. All others will be ignored.
'''
#get kwds
width = kwds.get("width",3)
height = kwds.get("height",2)
cols = kwds.get("cols",4)
import matplotlib.pyplot as plt
#get models
if 'prob' in criterion:
models, uids, ids = self.get_n_most_frequent_models(n,topology_type)
if topology_type == '':
title='%d Most Probable Model Topologies' % len(models)
elif 'struct' in topology_type:
title='%d Most Probable Structural Topologies' % len(models)
elif 'litho' in topology_type:
title='%d Most Probable Lithological Topologies' % len(models)
else:
return
elif 'clust' in criterion:
models, uids, ids = self.get_n_from_clusters(n,topology_type)
if topology_type == '':
title='%d Most Representative Model Topologies' % len(models)
elif 'struct' in topology_type:
title='%d Most Representative Structural Topologies' % len(models)
elif 'litho' in topology_type:
title='%d Most Representative Lithological Topologies' % len(models)
else:
return
else:
print "Error: Invalid criterion argument. Please pass either 'probability' (or 'prob') or 'clustering' (or 'clust')."
return
#plot grid
n = len(models) #number of models to plot
#check for stupididty
if n > 200:
print "Error: too many topologies of specified type '%s' to draw a grid. Please use a smaller n." % topology_type
rows = int(math.ceil(n / float(cols)))
#make plots
f,ax = plt.subplots(rows,cols)
ax=ax.ravel() #convert to 1d array
for i in range(n):
models[i].get_geology().plot_section(ax=ax[i],**kwds)
#set axis stuff
ax[i].get_xaxis().set_visible(False)
ax[i].get_yaxis().set_visible(False)
ax[i].set_title( 'Model %d' % ids[i])
if (kwds.has_key('uid')):
if kwds['uid']:
ax[i].set_title( 'Topology %d' % uids[i] )
for i in range(n,len(ax)): #hide all other axes
ax[i].set_visible(False)
#set fig size
f.set_figwidth(width * cols)
f.set_figheight(height * cols)
if (kwds.has_key('path')):
f.savefig( kwds['path'], dpi=kwds.get('dpi',300) )
else:
f.show()
def render_unique_models( self, directory, topology_type='struct', **kwds ):
'''
Saves images of sections through the type models of each unique
topology.
**Arguments**:
- *directory* = the directory to save the images to
- *topology_type* = the type of topology used to identify unique models
**Optional Keywords**:
- *max_t* = the maximum number of topologies to draw. If the number of topologies excedes
this number then all later topologies (the less likely ones) are ignored.
- *direction* = 'x', 'y', 'z' : coordinate direction of section plot (default: 'y')
- *position* = int or 'center' : cell position of section as integer value
or identifier (default: 'center')
- *figsize* = (x,y) : matplotlib figsize
- *colorbar* = bool : plot colorbar (default: True)
- *colorbar_orientation* = 'horizontal' or 'vertical' : orientation of colorbar
(default: 'vertical')
- *title* = string : plot title
- *cmap* = matplotlib.cmap : colormap (default: YlOrRd)
- *ve* = float : vertical exaggeration
- *layer_labels* = list of strings: labels for each unit in plot
- *layers_from* = noddy history file : get labels automatically from history file
- *data* = np.array : data to plot, if different to block data itself
- *litho_filter* = a list of lithologies to draw. All others will be ignored.
'''
#get collumn
if topology_type == '':
n = len(self.unique_topologies)
elif "litho" in topology_type:
n = len(self.unique_litho_topologies)
elif "struct" in topology_type:
n = len(self.unique_struct_topologies)
else:
print "Error: Invalid topology_type. This should be '' (full topologies), 'litho' or 'struct'"
return
if not os.path.exists(directory):
os.makedirs(directory)
if n > kwds.get('max_t',n):
n = kwds.get('max_t',n)
for i in range(n):
m = self.get_type_model(i,topology_type)
name='unique_%s_%d.png' % (topology_type,i)
path = os.path.join(directory,name)
if not m is None:
m.get_geology().plot_section(savefig=True,fig_filename=path,**kwds)
def plot_super_network(self,**kwds):
'''
Makes a hive-plot of the topology supernetwork.
**Optional Keywords**
- *path* = the path to save this figure
- *dpi* = the resolution of the figure
- *bg* = the background color. Default is black.
'''
#make axes
axes = [[],[],[]]
axes[0] = [(n,int(d['lithology'])) for n, d in self.super_topology.nodes(data=True)] #nodes
axes[1] = [(u,v,d['age']) for u,v,d in self.super_topology.edges(data=True)] #edges treated as nodes on these axes
axes[2] = [(u,v,d['area']) for u,v,d in self.super_topology.edges(data=True)]
#calculate node positions
node_positions = [{},{},{}]
for ax in range(3): #axes
for n in axes[ax]: #nodes
node_id = n[:-1]
if len(node_id) == 1:
node_id = n[0] #change from tuple to value
node_positions[ax][node_id] = n[-1] #use node parameter
#drop attributes from node ids
axes[0] = [ n for n, d in axes[0]]
axes[1] = [ (u,v) for u, v, d in axes[1]] #string contains edge type
axes[2] = [ (u,v) for u,v,d in axes[2]]
#calculate edges
edges = {}
edge_vals = {}
for u,v,d in self.super_topology.edges(data=True):
if not edges.has_key(d['edgeType']):
edges[d['edgeType']] = [] #init list
edge_vals[d['edgeType']] = {'cm' : 'alpha', 'color' : d['colour']}
e1 = (u,v) #inter group edge
e2 = (u,(u,v)) #between group edges
e3 = (v,(u,v))
e4 = ((u,v),(u,v))
edges[d['edgeType']].append(e1)
edges[d['edgeType']].append(e2)
edges[d['edgeType']].append(e3)
edges[d['edgeType']].append(e4)
#set edge weight
edge_vals[d['edgeType']][e1] = d['weight']
edge_vals[d['edgeType']][e2] = d['weight']
edge_vals[d['edgeType']][e3] = d['weight']
edge_vals[d['edgeType']][e4] = d['weight']
#make plot
from pynoddy.experiment.util.hive_plot import HivePlot
h = HivePlot(axes,edges,node_positions=node_positions, node_size=0.1,
edge_colormap=edge_vals,lbl_axes=['Stratigraphic Age',
'Structural Age',
'Surface Area'],
axis_cols=['white','white','white'])
h.draw(**kwds)
def analyse(self, output_directory, **kwds):
'''
Performs a stock-standard analyses on the generated model suite. Essentially this puts
the results from summary() in a text file and calls do_figures().
**Arguments**:
- *output_directory* = the directory to save results to.
**Optional Keywords**:
- *figs* - True if figures should be created (slow). Default is True.
- *data* - True if data should be saved (as csv). Default is true
- *pickle* - True if the analysis results should be pickled for later use. Default is True.
'''
#get kwds
figs = kwds.get('figs',True)
data = kwds.get('data',True)
pickle= kwds.get('pickle',True)
#check dir exists
if not os.path.exists(output_directory):
os.makedirs(output_directory)
#write text
f = open( os.path.join(output_directory,"output.txt"), 'w')
if hasattr(self,'base_model'): #write name
f.write("Results for %s\n" % self.base_model.basename)
f.write( self.summary() )
f.close()
#do figures
if figs:
self.do_figures(output_directory)
#save parameter space
if data:
self.get_parameter_space().to_csv(os.path.join(output_directory,'parameter_space.csv'))
#pickle this class for later
if pickle:
import cPickle as pickle
pickle.dump( self, open(os.path.join(output_directory,"analysis.pkl",), "wb" ))
def summary(self):
out = "%d different topologies found (from %d trials)\n" % (len(self.unique_topologies),len(self.models))
out += "%d unique lithological topologies found\n" % len(self.unique_litho_topologies)
out += "%d unique structural topologies found\n" % len(self.unique_struct_topologies)
out += "model variability (overall) = %.3f\n" % self.get_variability('')
out += "model variability (lithological) = %.3f\n" % self.get_variability('litho')
out += "model variability (structural) = %.3f\n" % self.get_variability('struct')
out += "Model realisations had topologies of (on average):\n"
out += "\t%.3f nodes\n" % self.get_average_node_count('')
out += "\t%.3f edges\n" % self.get_average_edge_count('')
out += "Model realisations had lithological topologies of (on average):\n"
out += "\t%.3f nodes\n" % self.get_average_node_count('litho')
out += "\t%.3f edges\n" % self.get_average_edge_count('litho')
out += "Model realisations had structural topologies of (on average):\n"
out += "\t%.3f nodes\n" % self.get_average_node_count('struct')
out += "\t%.3f edges\n" % self.get_average_edge_count('struct')
out += "Overall super network had %d edges\n" % self.super_topology.number_of_edges()
out += "Litho super network had %d edge\n" % self.super_litho_topology.number_of_edges()
out += "Struct super network had %d edges\n" % self.super_struct_topology.number_of_edges()
return out
def do_figures(self, directory):
'''
Writes a summary figures of this experiment to the specified directory
'''
#parameter histogram
self.histogram(path=os.path.join(directory,"model_space_frequencies.png"))
#plot super-network adjacency matrices
from pynoddy.output import NoddyTopology
NoddyTopology.draw_graph_matrix(self.super_topology,path=os.path.join(directory,"adjacency_full_super.png"))
NoddyTopology.draw_graph_matrix(self.super_struct_topology,path=os.path.join(directory,"adjacency_struct_super.png"))
NoddyTopology.draw_graph_matrix(self.super_litho_topology,path=os.path.join(directory,"adjacency_litho_super.png"))
#cumulative topologies
self.plot_cumulative_topologies('',path=os.path.join(directory,"cumulative_observed.png"))
self.plot_cumulative_topologies("litho",path=os.path.join(directory,"litho_cumulative_observed.png"))
self.plot_cumulative_topologies("struct",path=os.path.join(directory,"struct_cumulative_observed.png"))
#cumulative frequency distributions
self.plot_frequency_distribution('',path=os.path.join(directory,"cumulative_frequency.png"))
self.plot_frequency_distribution('struct',path=os.path.join(directory,"struct_cumulative_frequency.png"))
self.plot_frequency_distribution('litho',path=os.path.join(directory,"litho_cumulative_frequency.png"))
#boxplots
if len(self.unique_topologies) < 1000:
self.boxplot('',path=os.path.join(directory,"full_topology_ranges.png"),width=min(0.1*len(self.all_litho_topologies),100))
if len(self.unique_litho_topologies) < 1000:
self.boxplot("litho",path=os.path.join(directory,"litho_topology_ranges.png"),width=min(0.1*len(self.all_litho_topologies),100))
if len(self.unique_struct_topologies) < 1000:
self.boxplot("struct",path=os.path.join(directory,"struct_topology_ranges.png"))
#dendrogram
self.plot_dendrogram('',path=os.path.join(directory,"topology_dend.png"))
self.plot_dendrogram('litho',path=os.path.join(directory,"litho_topology_dend.png"))
self.plot_dendrogram('struct',path=os.path.join(directory,"struct_topology_dend.png"))
#try scatter plots. These will fail for models with large numbers of variables
if len(self.models[0].headings) < 5:
self.plot_scatter_matrix(topology_type='',path=os.path.join(directory,'topo_matrix.png'))
self.plot_scatter_matrix(topology_type='litho',path=os.path.join(directory,'litho_matrix.png'))
self.plot_scatter_matrix(topology_type='struct',path=os.path.join(directory,'struct_matrix.png'))
#save render of base model
if hasattr(self,'base_model'):
self.base_model.get_geology().plot_section(direction='x',savefig=True,fig_filename=os.path.join(directory,'base_model_yz.png'))
self.base_model.get_geology().plot_section(direction='y',savefig=True,fig_filename=os.path.join(directory,'base_model_xz.png'))
self.base_model.get_geology().plot_section(direction='z',savefig=True,fig_filename=os.path.join(directory,'base_model_xy.png'))
#save render of 8 most frequent topologies, ie. 'most probable models'
self.plot_n_models(8,'',criterion='prob',path=os.path.join(directory,'probable_topologies.png'))
self.plot_n_models(8,'struct',criterion='prob',path=os.path.join(directory,'probable_struct_topologies.png'))
self.plot_n_models(8,'litho',criterion='prob',path=os.path.join(directory,'probable_litho_topologies.png'))
#save render of 'representative' topologies. ie. represent 'spread of possibility'
self.plot_n_models(8,'',criterion='clust',path=os.path.join(directory,'model_cluster_centroids.png'))
self.plot_n_models(8,'struct',criterion='clust',path=os.path.join(directory,'struct_cluster_centroids.png'))
self.plot_n_models(8,'litho',criterion='clust',path=os.path.join(directory,'litho_cluster_centroids.png'))
#save renders of (first 50) unique models
self.render_unique_models(os.path.join(directory,"unique/all/x"),'', max_t=50, direction='x')
self.render_unique_models(os.path.join(directory,"unique/struct/x"),'struct', max_t=50, direction='x')
self.render_unique_models(os.path.join(directory,"unique/litho/x",'litho'), max_t=50, direction='x')
self.render_unique_models(os.path.join(directory,"unique/all/y"),'', max_t=50, direction='y')
self.render_unique_models(os.path.join(directory,"unique/struct/y"),'struct', max_t=50, direction='y')
self.render_unique_models(os.path.join(directory,"unique/litho/y"),'litho', max_t=50, direction='y')
self.render_unique_models(os.path.join(directory,"unique/all/z"),'', max_t=50, direction='z')
self.render_unique_models(os.path.join(directory,"unique/struct/z"),'struct', max_t=50, direction='z')
self.render_unique_models(os.path.join(directory,"unique/litho/z"),'litho', max_t=50, direction='z')
def is_strata_continuous(self,litho):
'''
Calculates the number of models in which all sections of a particular lithology are
directly connected.
**Arguments**:
- *litho* = the lithology id of interest
**Returns**
-The number of models in which the specified lithology is continuous.
'''
##Not implemented yet. This function should count the number of topologies in which
#all nodes of the given lithology are connected (not disjoint).
print "Not implemented yet. Sorry"
def is_strata_touching(self, litho1, litho2):
'''
Calculates the number of models in which these two strata come into contact.
**Arguments**:
- *litho1* = the lithology id of the first lithology
- *litho2* = the lithology id of the second lithology
**Returns**
- The number of models in which the two specified strata come into contact.
'''
##Not implemented yet. This function should count the number of topologies in which
#any nodes of litho1 are touching nodes of litho2
print "Not implemented yet. Sorry"
@staticmethod
def load_saved_analysis( path ):
'''
Loads a pickled (.pkl) analysis class
**Arguments**:
*path* = the path of the saved analysis
**Returns**:
- the loaded TopologyAnalysis class. Note that paths to noddy realisations will be broken
if this file has been moved/noddy models have been deleted. The general network analysis
functions should work however.
'''
import pickle
return pickle.load( open(path,'r') )
if __name__ == '__main__': #some debug stuff
import sys
sys.path.append(r"C:\Users\Sam\OneDrive\Documents\Masters\pynoddy")
os.chdir(r"C:\Users\Sam\Documents\Temporary Model Files")
#his="fold_fault.his"#"fold_fault.his"
#his="GBasin123.his"
his = "fold/fold_fault/fold_fault.his"
#params="fold_fault_dswa.csv" #"fold_fault_dswa.csv" #"fold_unconf_dewa.csv"
#params="GBasin123.csv"
params = "fold/fold_fault/fold_fault_dswa.csv"
a = TopologyAnalysis(his,params=params,output='fold/fold_fault/fold_fault_dswa',n=0,verbose=False,threads=8)
a.plot_super_network()
#print results
#print a.summary()
#a.analyse('output')
#save plots
#a.boxplot("litho",params=params,path="litho_topology_ranges.png",width=min(0.1*len(a.all_litho_topologies),100))
#a.boxplot("struct",params=params,path="struct_topology_ranges.png")
#a.histogram(params=params,path="model_space_frequencies.png")
#a.plot_cumulative_topologies("litho",path="litho_cumulative_observed.png")
#a.plot_cumulative_topologies("struct",path="struct_cumulative_observed.png")
#a.plot_dendrogram('litho',path="litho_topology_dend.png")
#a.plot_dendrogram('struct',path="struct_topology_dend.png")
#a.plot_scatter_matrix(param_pairs=['6_Dip','7_Dip','8_Dip'], topology_type='struct',path='struct_matrix1.png')
#a.plot_scatter_matrix(param_pairs=['6_Dip Direction','7_Dip Direction','8_Dip Direction'], topology_type='struct',path='struct_matrix2.png')
| gpl-2.0 |
gsprint23/sensor_data_preprocessing | src/main.py | 1 | 10003 | '''
Copyright (C) 2015 Gina L. Sprint
Email: Gina Sprint <gsprint@eecs.wsu.edu>
This file is part of sensor_data_preprocessing.
sensor_data_preprocessing is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
sensor_data_preprocessing is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with sensor_data_preprocessing. If not, see <http://www.gnu.org/licenses/>.
OrientationFiltering --- main.py
Created on Apr 2, 2015
This code is specific to processing Shimmer3 sensor data for our ambulatory circuit
wearable sensor study. Functions of interest that could be used for other research
domains include:
1. apply_filter()
2. orient_shank()
2. orient_COM()
Dependencies:
pandas
@author: Gina Sprint and Vladimir Borisov
'''
import os
import pandas as pd
import utils
from src.utils import closest_timestamp
def orient_filter_shank(path, sensor_loc):
'''
Orient and filter the shank sensors.
Keyword arguments:
'''
fname = os.path.join(path, sensor_loc + ".csv")
session_path = os.path.split(path)[0]
filtered_path = os.path.join(session_path, "Filtered_Ankle_Corrected")
parameter_path = os.path.join(filtered_path, "Orientation_Parameters")
section_plot_fname = os.path.join(parameter_path, sensor_loc + "_orientation.png")
notes_fname = os.path.join(parameter_path, sensor_loc + "_notes.txt")
horiz_df_fname = os.path.join(parameter_path, sensor_loc + "_config_orient_horizontal.csv")
vert_df_fname = os.path.join(parameter_path, sensor_loc + "_config_orient_vertical.csv")
oriented_filtered_df_fname = os.path.join(filtered_path, sensor_loc + "_oriented_filtered.csv")
# row 0 is device name, row 1 is signal name, row 2 is Raw or Cal, row 3 is units
df = pd.read_csv(fname, skiprows=[0, 2, 3], header=0, index_col=0)
# for debugging to specify files instead of create from user
#horiz_df = pd.read_csv(horiz_df_fname, skiprows=[0, 2, 3], header=0, index_col=0)
#vert_df = pd.read_csv(vert_df_fname, skiprows=[0, 2, 3], header=0, index_col=0)
horiz_df, vert_df = utils.get_user_defined_sections(fname, notes_fname, section_plot_fname, \
horiz_df_fname, vert_df_fname, df, sensor_loc)
oriented_df = utils.orient_shank(horiz_df, vert_df, df, sensor_loc)
oriented_filtered_df = utils.apply_filter(oriented_df.copy(), sensor_loc)
utils.plot_oriented_filtered_data(df, oriented_df, oriented_filtered_df, sensor_loc)
utils.write_data(fname, oriented_filtered_df_fname, oriented_filtered_df)
def orient_filter_COM(path):
'''
Orient and filter the COM sensor.
Keyword arguments:
'''
fname = os.path.join(path, "HIP.csv")
session_path = os.path.split(path)[0]
filtered_path = os.path.join(session_path, "Filtered_Ankle_Corrected")
oriented_filtered_df_fname = os.path.join(filtered_path, "HIP_oriented_filtered.csv")
# row 0 is device name, row 1 is signal name, row 2 is Raw or Cal, row 3 is units
df = pd.read_csv(fname, skiprows=[0, 2, 3], header=0, index_col=0)
oriented_df = utils.orient_COM(df)
oriented_filtered_df = utils.apply_filter(oriented_df.copy(), "HIP")
utils.plot_oriented_filtered_data(df, oriented_df, oriented_filtered_df, "HIP")
utils.write_data(fname, oriented_filtered_df_fname, oriented_filtered_df)
def orient_filter_assistive_device(path):
'''
Orient and filter the assistive device sensor.
Keyword arguments:
'''
walker_or_cane = "WALKER"
fname = os.path.join(path, "WALKER.csv")
# this file exists because mounting was not always consistent
# so each individual session has its own axes alignment file
axes_fname = os.path.join(path, "DEV_axes.txt")
session_path = os.path.split(path)[0]
filtered_path = os.path.join(session_path, "Filtered_Ankle_Corrected")
oriented_filtered_df_fname = os.path.join(filtered_path, "DEV_oriented_filtered.csv")
# row 0 is device name, row 1 is signal name, row 2 is Raw or Cal, row 3 is units
try: # try opening walker
df = pd.read_csv(fname, skiprows=[0, 2, 3], header=0, index_col=0)
except IOError: # try opening cane
fname = os.path.join(path, "CANE.csv")
try:
df = pd.read_csv(fname, skiprows=[0, 2, 3], header=0, index_col=0)
walker_or_cane = "CANE"
except IOError:
print "Walker or cane file DNE for this participant"
return
axes_df = pd.read_csv(axes_fname, header=0, index_col=0)
print "axes_df"
print axes_df
oriented_df = utils.orient_assistive_device(df, axes_df)
oriented_filtered_df = utils.apply_filter(oriented_df.copy(), walker_or_cane)
utils.plot_oriented_filtered_data(df, oriented_df, oriented_filtered_df, walker_or_cane)
utils.write_data(fname, oriented_filtered_df_fname, oriented_filtered_df)
def chop_dev_data_after_others(path, sensor_loc):
'''
This is a hacky solution to chop the assistive device data after
the other sensor files (HIP, LA, RA) have been chopped. Simply open
the COM file and grab the first and the last timestamps for each trial
Then chop DEV to the closest timestamps to the COM timestamps
Chop the data files to trim them down and specify start timestamp for COM.
Keyword arguments:
'''
session_path = os.path.split(path)[0]
filtered_path = os.path.join(session_path, "Filtered_Ankle_Corrected")
trials_path = os.path.join(session_path, "Trials")
meta_files = os.listdir(trials_path)
hip_T1_fname = hip_T2_fname = None
prefix = prefix2 = None
for fil in meta_files:
if "META" in fil:
if prefix is None:
prefix = fil[:-9]
else:
prefix2 = fil[:-9]
if "HIP" in fil:
if fil[4] == "1" or fil[4] == "3":
hip_T1_fname = fil
elif fil[4] == "2" or fil[4] == "4":
hip_T2_fname = fil
assert(hip_T1_fname is not None)
assert(hip_T2_fname is not None)
hip_T1_chopped_fname = os.path.join(trials_path, hip_T1_fname)
hip_T2_chopped_fname = os.path.join(trials_path, hip_T2_fname)
# cheating! just grab previously chopped dfs and start/end times for each file
hip_T1_df = pd.read_csv(hip_T1_chopped_fname, skiprows=[0, 2, 3], header=0, index_col=0)
hip_T2_df = pd.read_csv(hip_T2_chopped_fname, skiprows=[0, 2, 3], header=0, index_col=0)
# the cheat instead of calling get_user_defined_trial_times()
trial_times = [hip_T1_df.index[0], hip_T1_df.index[-1],
hip_T2_df.index[0], hip_T2_df.index[-1]]
loc_fname = os.path.join(filtered_path, sensor_loc + "_oriented_filtered.csv")
# not all participants use an assistive device
if os.path.isfile(loc_fname):
chopped_df_fname = os.path.join(trials_path, prefix + "_" + sensor_loc + ".csv")
chopped_df_fname2 = os.path.join(trials_path, prefix2 + "_" + sensor_loc + ".csv")
utils.chop_dependent_data(loc_fname, chopped_df_fname, chopped_df_fname2, trial_times)
def chop_data(path, dependent_sensor_locs):
'''
Chop the data files to trim them down and specify start timestamp for COM.
Keyword arguments:
'''
session_path = os.path.split(path)[0]
filtered_path = os.path.join(session_path, "Filtered_Ankle_Corrected")
trials_path = os.path.join(session_path, "Trials")
meta_files = os.listdir(trials_path)
prefix = prefix2 = None
for fil in meta_files:
if "META" in fil:
if prefix is None:
prefix = fil[:-9]
else:
prefix2 = fil[:-9]
com_fname = os.path.join(filtered_path, "HIP_oriented_filtered.csv")
chopped_plot_fname = os.path.join(filtered_path, "HIP_chopped.png")
notes_fname = os.path.join(filtered_path, "chopping_notes.txt")
chopped_df_fname = os.path.join(trials_path, prefix + "_HIP.csv")
chopped_df_fname2 = os.path.join(trials_path, prefix2 + "_HIP.csv")
trial_times = utils.get_user_defined_trial_times(com_fname, notes_fname, chopped_plot_fname, \
chopped_df_fname, chopped_df_fname2)
for sensor_loc in dependent_sensor_locs:
loc_fname = os.path.join(filtered_path, sensor_loc + "_oriented_filtered.csv")
# not all participants use an assistive device
if os.path.isfile(loc_fname):
chopped_df_fname = os.path.join(trials_path, prefix + "_" + sensor_loc + ".csv")
chopped_df_fname2 = os.path.join(trials_path, prefix2 + "_" + sensor_loc + ".csv")
utils.chop_dependent_data(loc_fname, chopped_df_fname, chopped_df_fname2, trial_times)
if __name__ == '__main__':
# filename munging...
home_dir = os.path.expanduser("~")
path = os.path.join(home_dir, r"Google Drive\StLukes Research\Data\Participant_Data")
path = os.path.join(path, r"016\016_S2_7-15-14\Timestamp_Aligned")
# HIP, RA, LA, or DEV #os.path.basename(fname)[:-4]
#orient_filter_shank(path, "LA")
#orient_filter_shank(path, "RA")
#orient_filter_COM(path)
orient_filter_assistive_device(path)
chop_dev_data_after_others(path, "DEV")
#chop_data(path, ["LA", "RA"])#, "DEV"])
| gpl-3.0 |
dennisobrien/bokeh | sphinx/source/docs/user_guide/examples/extensions_example_latex.py | 6 | 2676 | """ The LaTex example was derived from: http://matplotlib.org/users/usetex.html
"""
import numpy as np
from bokeh.models import Label
from bokeh.plotting import figure, show
JS_CODE = """
import {Label, LabelView} from "models/annotations/label"
export class LatexLabelView extends LabelView
render: () ->
#--- Start of copied section from ``Label.render`` implementation
# Here because AngleSpec does units tranform and label doesn't support specs
switch @model.angle_units
when "rad" then angle = -1 * @model.angle
when "deg" then angle = -1 * @model.angle * Math.PI/180.0
panel = @model.panel ? @plot_view.frame
xscale = @plot_view.frame.xscales[@model.x_range_name]
yscale = @plot_view.frame.yscales[@model.y_range_name]
sx = if @model.x_units == "data" then xscale.compute(@model.x) else panel.xview.compute(@model.x)
sy = if @model.y_units == "data" then yscale.compute(@model.y) else panel.yview.compute(@model.y)
sx += @model.x_offset
sy -= @model.y_offset
#--- End of copied section from ``Label.render`` implementation
# Must render as superpositioned div (not on canvas) so that KaTex
# css can properly style the text
@_css_text(@plot_view.canvas_view.ctx, "", sx, sy, angle)
# ``katex`` is loaded into the global window at runtime
# katex.renderToString returns a html ``span`` element
katex.render(@model.text, @el, {displayMode: true})
export class LatexLabel extends Label
type: 'LatexLabel'
default_view: LatexLabelView
"""
class LatexLabel(Label):
"""A subclass of the Bokeh built-in `Label` that supports rendering
LaTex using the KaTex typesetting library.
Only the render method of LabelView is overloaded to perform the
text -> latex (via katex) conversion. Note: ``render_mode="canvas``
isn't supported and certain DOM manipulation happens in the Label
superclass implementation that requires explicitly setting
`render_mode='css'`).
"""
__javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.js"]
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.css"]
__implementation__ = JS_CODE
x = np.arange(0.0, 1.0 + 0.01, 0.01)
y = np.cos(2*2*np.pi*x) + 2
p = figure(title="LaTex Demonstration", plot_width=500, plot_height=500)
p.line(x, y)
# Note: must set ``render_mode="css"``
latex = LatexLabel(text="f = \sum_{n=1}^\infty\\frac{-e^{i\pi}}{2^n}!",
x=35, y=445, x_units='screen', y_units='screen',
render_mode='css', text_font_size='16pt',
background_fill_color='#ffffff')
p.add_layout(latex)
show(p)
| bsd-3-clause |
geodynamics/burnman | examples/example_gibbs_modifiers.py | 2 | 7755 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_gibbs_modifiers
----------------
This example script demonstrates the modifications to
the gibbs free energy (and derivatives) that can be applied
as masks over the results from the equations of state.
These modifications currently take the forms:
- Landau corrections (implementations of Putnis (1992)
and Holland and Powell (2011)
- Bragg-Williams corrections
(implementation of Holland and Powell (1996))
- Linear (a simple delta_E + delta_V*P - delta_S*T
- Magnetic (Chin, Hertzman and Sundman (1987))
*Uses:*
* :doc:`mineral_database`
*Demonstrates:*
* creating a mineral with excess contributions
* calculating thermodynamic properties
"""
from __future__ import absolute_import
# Here we import standard python modules that are required for
# usage of BurnMan. In particular, numpy is used for handling
# numerical arrays and mathematical operations on them, and
# matplotlib is used for generating plots of results of calculations
import numpy as np
import matplotlib.pyplot as plt
import burnman_path # adds the local burnman directory to the path
# Here we import the relevant modules from BurnMan. The burnman
# module imports several of the most important functionalities of
# the library, including the ability to make composites, and compute
# thermoelastic properties of them. The minerals module includes
# the mineral physical parameters for the predefined minerals in
# BurnMan
import burnman
from burnman import minerals
assert burnman_path # silence pyflakes warning
if __name__ == "__main__":
# Here we show the interesting features of Landau transitions
# First, lets look at the P wave velocity in stishovite as it transforms
# to the CaCl2 structure at high pressure
stv = minerals.SLB_2011.stishovite()
T = 1500.
pressures = np.linspace(60.e9, 80.e9, 101)
v_ps = np.empty_like(pressures)
for i, P in enumerate(pressures):
stv.set_state(P, T)
v_ps[i] = stv.v_p
plt.plot(pressures / 1.e9, v_ps / 1.e3, label='stishovite')
plt.xlabel('P (GPa)')
plt.ylabel('V_p (km/s)')
plt.legend(loc="lower right")
plt.show()
# Landau transitions also cause spikes in heat capacity
# Here we show an example of troilite, as implemented by
# Evans et al. (2010) and incorporated into the dataset
# of Holland and Powell (2011)
# Here we show you how to create a mineral with a
# Landau transition.
# A special feature of burnman is that you can have
# more than one Landau (or any other type of)
# contribution.
# Here's a copy of lot (low-temperature troilite) from
# Holland and Powell (2011), with the Landau transition
# of tro also included.
from burnman.processchemistry import dictionarize_formula, formula_mass
class lot (burnman.Mineral):
def __init__(self):
formula = 'Fe1.0S1.0'
formula = dictionarize_formula(formula)
self.params = {
'name': 'lot',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': -102160.0,
'S_0': 60.0,
'V_0': 1.818e-05,
'Cp': [50.2, 0.011052, -940000.0, 0.0],
'a_0': 4.93e-05,
'K_0': 65800000000.0,
'Kprime_0': 4.17,
'Kdprime_0': -6.3e-11,
'n': sum(formula.values()),
'molar_mass': formula_mass(formula)}
self.property_modifiers = [
['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 420.0,
'S_D': 10.0,
'V_D': 0.0}],
['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 598.0,
'S_D': 12.0,
'V_D': 4.1e-7}]]
burnman.Mineral.__init__(self)
troilite = lot()
lot = minerals.HP_2011_ds62.lot()
tro = minerals.HP_2011_ds62.tro()
P = 1.e5
temperatures = np.linspace(300., 1300., 101)
C_ps_troilite = np.empty_like(temperatures)
C_ps_lot = np.empty_like(temperatures)
C_ps_tro = np.empty_like(temperatures)
for i, T in enumerate(temperatures):
troilite.set_state(P, T)
C_ps_troilite[i] = troilite.C_p
lot.set_state(P, T)
C_ps_lot[i] = lot.C_p
tro.set_state(P, T)
C_ps_tro[i] = tro.C_p
plt.plot(temperatures, C_ps_lot, 'r--', label='low temperature (HP2011)')
plt.plot(temperatures, C_ps_tro, 'g--', label='high temperature (HP2011)')
plt.plot(temperatures, C_ps_troilite, 'b-', label='troilite')
plt.xlabel('T (K)')
plt.ylabel('C_p (J/K/mol)')
plt.legend(loc="lower right")
plt.show()
# Spinel is a mineral with a Bragg-Williams type model
sp = minerals.HP_2011_ds62.sp()
P = 1.e5
temperatures = np.linspace(300., 1300., 101)
C_ps = np.empty_like(temperatures)
for i, T in enumerate(temperatures):
sp.set_state(P, T)
C_ps[i] = sp.C_p
# print sp._property_modifiers
plt.plot(temperatures, C_ps, label='spinel')
plt.xlabel('T (K)')
plt.ylabel('C_p (J/K/mol)')
plt.legend(loc="lower right")
plt.show()
# Wuestite has a Landau-type transition at low temperature,
# but we could also choose to simplify things by just having an excess entropy
# to estimate the thermal properties at high temperature
# Here we ignore the 0 Pa, 0 K gibbs and volume contributions, as the endmember
# properties would need refitting too...
class wuestite (burnman.Mineral):
def __init__(self):
formula = 'FeO'
formula = dictionarize_formula(formula)
self.params = {
'name': 'Wuestite',
'formula': formula,
'equation_of_state': 'slb3',
'F_0': -242000.0,
'V_0': 1.226e-05,
'K_0': 1.79e+11,
'Kprime_0': 4.9,
'Debye_0': 454.0,
'grueneisen_0': 1.53,
'q_0': 1.7,
'G_0': 59000000000.0,
'Gprime_0': 1.4,
'eta_s_0': -0.1,
'n': sum(formula.values()),
'molar_mass': formula_mass(formula)}
self.property_modifiers = [
['linear', {'delta_E': 0., 'delta_S': 12., 'delta_V': 0.}]]
self.uncertainties = {
'err_F_0': 1000.0,
'err_V_0': 0.0,
'err_K_0': 1000000000.0,
'err_K_prime_0': 0.2,
'err_Debye_0': 21.0,
'err_grueneisen_0': 0.13,
'err_q_0': 1.0,
'err_G_0': 1000000000.0,
'err_Gprime_0': 0.1,
'err_eta_s_0': 1.0}
burnman.Mineral.__init__(self)
wus = wuestite()
wus_HP = burnman.minerals.HP_2011_ds62.fper()
P = 1.e5
temperatures = np.linspace(300., 1300., 101)
Ss = np.empty_like(temperatures)
Ss_HP = np.empty_like(temperatures)
for i, T in enumerate(temperatures):
wus.set_state(P, T)
Ss[i] = wus.S
wus_HP.set_state(P, T)
Ss_HP[i] = wus_HP.S
plt.plot(temperatures, Ss, label='linear')
plt.plot(temperatures, Ss_HP, label='HP_2011_ds62')
plt.xlabel('T (K)')
plt.ylabel('S (J/K/mol)')
plt.legend(loc="lower right")
plt.show()
| gpl-2.0 |
poryfly/scikit-learn | sklearn/tests/test_cross_validation.py | 8 | 42537 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/tseries/tests/test_converter.py | 13 | 5611 | from datetime import datetime, time, timedelta, date
import sys
import os
import nose
import numpy as np
from numpy.testing import assert_almost_equal as np_assert_almost_equal
from pandas import Timestamp, Period
from pandas.compat import u
import pandas.util.testing as tm
from pandas.tseries.offsets import Second, Milli, Micro
try:
import pandas.tseries.converter as converter
except ImportError:
raise nose.SkipTest("no pandas.tseries.converter, skipping")
def test_timtetonum_accepts_unicode():
assert(converter.time2num("00:01") == converter.time2num(u("00:01")))
class TestDateTimeConverter(tm.TestCase):
def setUp(self):
self.dtc = converter.DatetimeConverter()
self.tc = converter.TimeFormatter(None)
def test_convert_accepts_unicode(self):
r1 = self.dtc.convert("12:22", None, None)
r2 = self.dtc.convert(u("12:22"), None, None)
assert(r1 == r2), "DatetimeConverter.convert should accept unicode"
def test_conversion(self):
rs = self.dtc.convert(['2012-1-1'], None, None)[0]
xp = datetime(2012, 1, 1).toordinal()
self.assertEqual(rs, xp)
rs = self.dtc.convert('2012-1-1', None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(date(2012, 1, 1), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(datetime(2012, 1, 1).toordinal(), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert('2012-1-1', None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(Timestamp('2012-1-1'), None, None)
self.assertEqual(rs, xp)
# also testing datetime64 dtype (GH8614)
rs = self.dtc.convert(np.datetime64('2012-01-01'), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(np.datetime64('2012-01-01 00:00:00+00:00'), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(np.array([np.datetime64('2012-01-01 00:00:00+00:00'),
np.datetime64('2012-01-02 00:00:00+00:00')]), None, None)
self.assertEqual(rs[0], xp)
def test_conversion_float(self):
decimals = 9
rs = self.dtc.convert(Timestamp('2012-1-1 01:02:03', tz='UTC'), None, None)
xp = converter.dates.date2num(Timestamp('2012-1-1 01:02:03', tz='UTC'))
np_assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(Timestamp('2012-1-1 09:02:03', tz='Asia/Hong_Kong'), None, None)
np_assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None)
np_assert_almost_equal(rs, xp, decimals)
def test_time_formatter(self):
self.tc(90000)
def test_dateindex_conversion(self):
decimals = 9
for freq in ('B', 'L', 'S'):
dateindex = tm.makeDateIndex(k = 10, freq = freq)
rs = self.dtc.convert(dateindex, None, None)
xp = converter.dates.date2num(dateindex._mpl_repr())
np_assert_almost_equal(rs, xp, decimals)
def test_resolution(self):
def _assert_less(ts1, ts2):
val1 = self.dtc.convert(ts1, None, None)
val2 = self.dtc.convert(ts2, None, None)
if not val1 < val2:
raise AssertionError('{0} is not less than {1}.'.format(val1, val2))
# Matplotlib's time representation using floats cannot distinguish intervals smaller
# than ~10 microsecond in the common range of years.
ts = Timestamp('2012-1-1')
_assert_less(ts, ts + Second())
_assert_less(ts, ts + Milli())
_assert_less(ts, ts + Micro(50))
class TestPeriodConverter(tm.TestCase):
def setUp(self):
self.pc = converter.PeriodConverter()
class Axis(object):
pass
self.axis = Axis()
self.axis.freq = 'D'
def test_convert_accepts_unicode(self):
r1 = self.pc.convert("2012-1-1", None, self.axis)
r2 = self.pc.convert(u("2012-1-1"), None, self.axis)
self.assert_equal(r1, r2, "PeriodConverter.convert should accept unicode")
def test_conversion(self):
rs = self.pc.convert(['2012-1-1'], None, self.axis)[0]
xp = Period('2012-1-1').ordinal
self.assertEqual(rs, xp)
rs = self.pc.convert('2012-1-1', None, self.axis)
self.assertEqual(rs, xp)
rs = self.pc.convert([date(2012, 1, 1)], None, self.axis)[0]
self.assertEqual(rs, xp)
rs = self.pc.convert(date(2012, 1, 1), None, self.axis)
self.assertEqual(rs, xp)
rs = self.pc.convert([Timestamp('2012-1-1')], None, self.axis)[0]
self.assertEqual(rs, xp)
rs = self.pc.convert(Timestamp('2012-1-1'), None, self.axis)
self.assertEqual(rs, xp)
# FIXME
# rs = self.pc.convert(np.datetime64('2012-01-01'), None, self.axis)
# self.assertEqual(rs, xp)
#
# rs = self.pc.convert(np.datetime64('2012-01-01 00:00:00+00:00'), None, self.axis)
# self.assertEqual(rs, xp)
#
# rs = self.pc.convert(np.array([np.datetime64('2012-01-01 00:00:00+00:00'),
# np.datetime64('2012-01-02 00:00:00+00:00')]), None, self.axis)
# self.assertEqual(rs[0], xp)
def test_integer_passthrough(self):
# GH9012
rs = self.pc.convert([0, 1], None, self.axis)
xp = [0, 1]
self.assertEqual(rs, xp)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
totalgood/pug-data | pug/data/gmm.py | 1 | 4320 | """Generate synthetic samples from a mixture of gaussians or Gaussian Mixture Model (GMM)
Most of this code is derived from Nehalem Labs examples:
http://www.nehalemlabs.net/prototype/blog/2014/04/03/quick-introduction-to-gaussian-mixture-models-with-python/
References:
https://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm
https://en.wikipedia.org/wiki/Mixture_model#Gaussian_mixture_model
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_stats
from mpl_toolkits.mplot3d import Axes3D
from sklearn import mixture
def normal_pdf(*args, mean=None, cov=None, **kwargs):
"""Normal Probability Density Function. Thin wrapper for scipy.stats.multivariate_normal
>>> normal_pdf([-1, 0, 1], sigma=1, mu=0, cov=1)
array([ 0.24197072, 0.39894228, 0.24197072])
>>> normal_pdf([[-1,0,1], [-1,0,1]], sigma=[1,2], mu=0, cov=np.identity(2))
... # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
array([ 0.05854983, 0.09653235, 0.05854983, 0.09653235, 0.15915494,
0.09653235, 0.05854983, 0.09653235, 0.05854983])
"""
# sigma = cov, but cov overrides sigma, both accept scalars or np.arrays
cov = kwargs.pop('sigma', None) if cov is None else cov
cov = np.array(1. if cov is None else cov)
mean = kwargs.pop('mu', None) if mean is None else mean
mean = np.array(0. if mean is None else mean)
D = len(mean)
if any(D != cov_D for cov_D in cov.shape):
mean = mean * np.ones(cov.shape[0])
cov = cov * np.diag(np.array([[1.,2],[3,4]]).shape)
X = np.array(args[0] if len(args) == 1 else args if len(args) == 0 else 0.)
if X.shape[1] != len(mean):
X = X.T
X = np.array(X)
return stats.mutivariate_normal(mean=mu, cov=cov).pdf(X)
return mlab.bivariate_normal(X, Y, sigma[0], sigma[1], mu[0], mu[1], sigmaxy)
def gaussian_mix(N=2, ratio=.7):
p1 = gaussian()
p2 = gaussian(sigma=(1.6, .8), mu=(.8, 0.5))
return ratio * p1 + (30. - ratio) * p2 / ratio
# return gaussian() + gaussian()
def plot_gaussians(X=None, Y=None, Z=None):
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.get_cmap('coolwarm'),
linewidth=0, antialiased=True)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.savefig('3dgauss.png')
plt.clf()
def sample_prgs(N=10000, s=10, prgs=[np.random.normal] * 2, show=True, save='prg_samples.png', verbosity=1):
'''Metropolis Hastings
Inputs:
N (int): Number of random samples to generate
s (int):
prgs (list of func): list of Pseudo Random Number Generators
default: np.random.normal
'''
r = np.zeros(2)
p = gaussian()
if verbosity > 0:
print("True mean: {}".format(p))
samples = []
for i in xrange(N):
rn = r + np.array([prg(size=1) for prg in prgs]).T[0]
# get a single probability from the mixed distribution
pn = gaussian_mix(rn[0], rn[1])
if pn >= p:
p = pn
r = rn
else:
u = np.random.rand()
if u < pn / p:
p = pn
r = rn
if i % s == 0:
samples.append(r)
samples = np.array(samples)
if show or save:
plt.scatter(samples[:, 0], samples[:, 1], alpha=0.5, s=1)
'''Plot target'''
dx = 0.01
x = np.arange(np.min(samples), np.max(samples), dx)
y = np.arange(np.min(samples), np.max(samples), dx)
X, Y = np.meshgrid(x, y)
Z = gaussian_mix(X, Y)
CS = plt.contour(X, Y, Z, 10, alpha=0.5)
plt.clabel(CS, inline=1, fontsize=10)
if save:
plt.savefig(save)
return samples
def fit_gmm(samples, show=True, save='class_predictions.png', verbosity=1):
gmm = mixture.GMM(n_components=2, covariance_type='full')
gmm.fit(samples)
if verbosity > 0:
print gmm.means_
colors = list('rgbyomck')
if show:
c = [colors[i % len(colors)] for i in gmm.predict(samples)]
ax = plt.gca()
ax.scatter(samples[:, 0], samples[:, 1], c=c, alpha=0.8)
if save:
plt.savefig(save)
if __name__ == '__main__':
plot_gaussians()
samples = sample_prgs()
fit_gmm(samples)
| mit |
awni/tensorflow | tensorflow/examples/skflow/iris_val_based_early_stopping.py | 2 | 2221 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
from tensorflow.contrib import skflow
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size=0.2, random_state=42)
val_monitor = skflow.monitors.ValidationMonitor(X_val, y_val,
early_stopping_rounds=200,
n_classes=3)
# classifier with early stopping on training data
classifier1 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier1.fit(X_train, y_train)
score1 = metrics.accuracy_score(y_test, classifier1.predict(X_test))
# classifier with early stopping on validation data
classifier2 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier2.fit(X_train, y_train, val_monitor)
score2 = metrics.accuracy_score(y_test, classifier2.predict(X_test))
# in many applications, the score is improved by using early stopping on val data
print(score2 > score1)
| apache-2.0 |
ShujiaHuang/AsmVar | src/AsmvarVarScore/modul/VariantRecalibrator.py | 2 | 3592 | """
===============================================
===============================================
Author: Shujia Huang
Date : 2014-05-23 11:21:53
"""
import sys
import matplotlib.pyplot as plt
# My own class
import VariantDataManager as vdm
import VariantRecalibratorEngine as vre
import VariantRecalibratorArgumentCollection as VRAC
class VariantRecalibrator:
def __init__ (self):
self.VRAC = VRAC.VariantRecalibratorArgumentCollection()
self.dataManager = vdm.VariantDataManager()
self.engine = vre.VariantRecalibratorEngine(self.VRAC)
self.badLodCutoff = None
self.LodCumInTrain = []
def OnTraversalDone(self, data):
self.dataManager.SetData(data)
self.dataManager.NormalizeData()
# Generate the positive model using the training data and evaluate each variant
positiveTrainingData = self.dataManager.GetTrainingData()
print >> sys.stderr, '[INFO] Training the goodModel ...'
goodModel = self.engine.GenerateModel(positiveTrainingData, self.VRAC.MAX_GAUSSIANS)
print >> sys.stderr, '[INFO] The converged information of goodModel is:' , goodModel.converged_
print >> sys.stderr, '[INFO] The means of gaussion of goodModel is:\n' , goodModel.means_
print >> sys.stderr, '[INFO] The covariance of gaussion of goodModel is:\n', goodModel.covars_, '\n'
self.engine.EvaluateData(self.dataManager.data, goodModel, False)
self.badLodCutoff, self.LodCumInTrain = self.dataManager.CalculateWorstLodCutoff()
# Generate the negative model using the worst performing data and evaluate each variant contrastively
print >> sys.stderr, '[INFO] Training the badModel ...'
negativeTrainingData = self.dataManager.SelectWorstVariants(self.badLodCutoff)
badModel = self.engine.GenerateModel(negativeTrainingData, min(self.VRAC.MAX_GAUSSIANS_FOR_NEGATIVE_MODEL, self.VRAC.MAX_GAUSSIANS))
print >> sys.stderr, '\n[INFO] The converged information of badModel is:' , badModel.converged_
print >> sys.stderr, '[INFO] The means of gaussion of badModel is:\n' , badModel.means_
print >> sys.stderr, '[INFO] The covariance of gaussion of badModel is:\n', badModel.covars_, '\n'
self.engine.EvaluateData(self.dataManager.data, badModel, True)
if (not goodModel.converged_) or (not badModel.converged_): raise ValueError ('[ERROR] NaN LOD value assigned. Clustering with these variants and these annotations is unsafe. Please consider raising the number of variants used to train the negative model or lowering the maximum number of Gaussians allowed for use in the model.')
# Find the VQSLOD cutoff values which correspond to the various tranches of calls requested by the user
self.engine.CalculateWorstPerformingAnnotation(self.dataManager.data, goodModel, badModel)
def VisualizationLodVStrainingSet(self, figName):
fig = plt.figure()
plt.title('LOD VS Positive training set', fontsize = 14)
plt.plot(self.LodCumInTrain[:,0], self.LodCumInTrain[:,1], 'r-')
#plt.scatter(self.LodCumInTrain[:,0], self.LodCumInTrain[:,1], c='r', marker='.', linewidth = 0, alpha = 0.5)
plt.plot([self.badLodCutoff, self.badLodCutoff], [0,1], 'g--')
plt.ylim(0, 1.0)
plt.xlabel('Variant score threshold for the bad model', fontsize = 16)
plt.ylabel('Rate of Positive->Negative', fontsize = 16)
fig.savefig(figName + '.png')
fig.savefig(figName + '.pdf')
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.