repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
boada/wmh | mkPlayers.py | 1 | 3484 | import pandas as pd
from glob import glob
from string import capwords
import numpy as np
def levenshtein(source, target):
if len(source) < len(target):
return levenshtein(target, source)
# So now we have len(source) >= len(target).
if len(target) == 0:
return len(source)
# We call tuple() to force strings to be used as sequences
# ('c', 'a', 't', 's') - numpy uses them as values by default.
source = np.array(tuple(source))
target = np.array(tuple(target))
# We use a dynamic programming algorithm, but with the
# added optimization that we only need the last two rows
# of the matrix.
previous_row = np.arange(target.size + 1)
for s in source:
# Insertion (target grows longer than source):
current_row = previous_row + 1
# Substitution or matching:
# Target and source items are aligned, and either
# are different (cost of 1), or are the same (cost of 0).
current_row[1:] = np.minimum(
current_row[1:],
np.add(previous_row[:-1], target != s))
# Deletion (target grows shorter than source):
current_row[1:] = np.minimum(
current_row[1:],
current_row[0:-1] + 1)
previous_row = current_row
return previous_row[-1]
files = glob('wtc_data/wtc*_results.json')
#make a new dataframe
df = pd.DataFrame()
results = [pd.read_json(f) for f in files]
# make sure first and last names are capitalized. also removes leading and
# trailing white space.
for r in results:
for i, p in r.player.iteritems():
r.loc[i, 'player'] = capwords(p)
df = pd.concat(results, ignore_index=True)
# now get a sorted list of unique players
players = df.player.unique()
players = np.sort(players)
# some of the player names will be very close. AKA misspellings. We can get a
# list of those using the levenshtein function.
name_fix = {}
for i, p in enumerate(players):
for j in range(4):
try:
dist = levenshtein(players[i], players[i + j + 1])
except IndexError: # we are running off the end
continue
if dist <= 2:
print(players[i], players[i + j + 1])
name_fix[players[i]] = players[i + j + 1]
# this should be visually inspected to make sure it looks good before replacing
# names in the data files. I manually removed 1 item and tweaked a second to
# make sure things worked out.
# After the visual inspect you can run the following code to update all of the
# data frames.
# manual edits:
# del name_fix['David Kane']
# del name_fix['Maurus Markwalder']
# name_fix['Maurus Markwalder:'] = 'Maurus Markwalder'
if False:
for r in results:
for n in name_fix:
r.loc[r['player'] == 'Maurus Markwalder:', 'player'] = 'Maurus Markwalder'
# After this point. You can update the number of changes in the levenshtein
# function to 3 (line 74) and rerun things. This gives a few more changes, most
# of which aren't real changes. I did update a few at this point 'Matt' ->
# 'Matthew' for example. Again, this has to be done interactive, to avoid names
# that really shouldn't be changed.
# Here are the changes:
# name_fix['Jay Mcleod'] = 'Jason Mcleod'
# name_fix['Freek Punt'] = 'Frederik Punt'
# name_fix['Nikos Kerazoglou'] = 'Nikolaos Kerazoglou'
# name_fix['Michal Konieczny'] = 'Michal Nakonieczny' -- this is incorrect
# name_fix['Matt Goligher'] = 'Matthew Goligher'
| mit |
theakholic/ThinkStats2 | code/survival.py | 65 | 17881 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import numpy as np
import pandas
import nsfg
import thinkstats2
import thinkplot
"""
Outcome codes from http://www.icpsr.umich.edu/nsfg6/Controller?
displayPage=labelDetails&fileCode=PREG§ion=&subSec=8016&srtLabel=611932
1 LIVE BIRTH 9148
2 INDUCED ABORTION 1862
3 STILLBIRTH 120
4 MISCARRIAGE 1921
5 ECTOPIC PREGNANCY 190
6 CURRENT PREGNANCY 352
"""
FORMATS = ['pdf', 'eps', 'png']
class SurvivalFunction(object):
"""Represents a survival function."""
def __init__(self, cdf, label=''):
self.cdf = cdf
self.label = label or cdf.label
@property
def ts(self):
return self.cdf.xs
@property
def ss(self):
return 1 - self.cdf.ps
def __getitem__(self, t):
return self.Prob(t)
def Prob(self, t):
"""Returns S(t), the probability that corresponds to value t.
t: time
returns: float probability
"""
return 1 - self.cdf.Prob(t)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Mean(self):
"""Mean survival time."""
return self.cdf.Mean()
def Items(self):
"""Sorted list of (t, s) pairs."""
return zip(self.ts, self.ss)
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, survival function)
"""
return self.ts, self.ss
def MakeHazard(self, label=''):
"""Computes the hazard function.
sf: survival function
returns: Pmf that maps times to hazard rates
"""
ss = self.ss
lams = {}
for i, t in enumerate(self.ts[:-1]):
hazard = (ss[i] - ss[i+1]) / ss[i]
lams[t] = hazard
return HazardFunction(lams, label=label)
def MakePmf(self, filler=None):
"""Makes a PMF of lifetimes.
filler: value to replace missing values
returns: Pmf
"""
pmf = thinkstats2.Pmf()
for val, prob in self.cdf.Items():
pmf.Set(val, prob)
cutoff = self.cdf.ps[-1]
if filler is not None:
pmf[filler] = 1-cutoff
return pmf
def RemainingLifetime(self, filler=None, func=thinkstats2.Pmf.Mean):
"""Computes remaining lifetime as a function of age.
func: function from conditional Pmf to expected liftime
returns: Series that maps from age to remaining lifetime
"""
pmf = self.MakePmf(filler=filler)
d = {}
for t in sorted(pmf.Values())[:-1]:
pmf[t] = 0
pmf.Normalize()
d[t] = func(pmf) - t
#print(t, d[t])
return pandas.Series(d)
class HazardFunction(object):
"""Represents a hazard function."""
def __init__(self, d, label=''):
"""Initialize the hazard function.
d: dictionary (or anything that can initialize a series)
label: string
"""
self.series = pandas.Series(d)
self.label = label
def __getitem__(self, t):
return self.series[t]
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, hazard function)
"""
return self.series.index, self.series.values
def MakeSurvival(self, label=''):
"""Makes the survival function.
returns: SurvivalFunction
"""
ts = self.series.index
ss = (1 - self.series).cumprod()
cdf = thinkstats2.Cdf(ts, 1-ss)
sf = SurvivalFunction(cdf, label=label)
return sf
def Extend(self, other):
"""Extends this hazard function by copying the tail from another.
other: HazardFunction
"""
last = self.series.index[-1]
more = other.series[other.series.index > last]
self.series = pandas.concat([self.series, more])
def ConditionalSurvival(pmf, t0):
"""Computes conditional survival function.
Probability that duration exceeds t0+t, given that
duration >= t0.
pmf: Pmf of durations
t0: minimum time
returns: tuple of (ts, conditional survivals)
"""
cond = thinkstats2.Pmf()
for t, p in pmf.Items():
if t >= t0:
cond.Set(t-t0, p)
return SurvivalFunction(thinkstats2.Cdf(cond))
def PlotConditionalSurvival(durations):
"""Plots conditional survival curves for a range of t0.
durations: list of durations
"""
pmf = thinkstats2.Pmf(durations)
times = [8, 16, 24, 32]
thinkplot.PrePlot(len(times))
for t0 in times:
sf = ConditionalSurvival(pmf, t0)
label = 't0=%d' % t0
thinkplot.Plot(sf, label=label)
thinkplot.Show()
def PlotSurvival(complete):
"""Plots survival and hazard curves.
complete: list of complete lifetimes
"""
thinkplot.PrePlot(3, rows=2)
cdf = thinkstats2.Cdf(complete, label='cdf')
sf = SurvivalFunction(cdf, label='survival')
print(cdf[13])
print(sf[13])
thinkplot.Plot(sf)
thinkplot.Cdf(cdf, alpha=0.2)
thinkplot.Config()
thinkplot.SubPlot(2)
hf = sf.MakeHazard(label='hazard')
print(hf[39])
thinkplot.Plot(hf)
thinkplot.Config(ylim=[0, 0.75])
def PlotHazard(complete, ongoing):
"""Plots the hazard function and survival function.
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
"""
# plot S(t) based on only complete pregnancies
cdf = thinkstats2.Cdf(complete)
sf = SurvivalFunction(cdf)
thinkplot.Plot(sf, label='old S(t)', alpha=0.1)
thinkplot.PrePlot(2)
# plot the hazard function
hf = EstimateHazardFunction(complete, ongoing)
thinkplot.Plot(hf, label='lams(t)', alpha=0.5)
# plot the survival function
sf = hf.MakeSurvival()
thinkplot.Plot(sf, label='S(t)')
thinkplot.Show(xlabel='t (weeks)')
def EstimateHazardFunction(complete, ongoing, label='', shift=1e-7):
"""Estimates the hazard function by Kaplan-Meier.
http://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
label: string
shift: presumed additional survival of ongoing
"""
# pmf and sf of complete lifetimes
n = len(complete)
hist_complete = thinkstats2.Hist(complete)
sf_complete = SurvivalFunction(thinkstats2.Cdf(complete))
# sf for ongoing lifetimes
# The shift is a regrettable hack needed to deal with simultaneity.
# If a case is complete at some t and another case is ongoing
# at t, we presume that the ongoing case exceeds t+shift.
m = len(ongoing)
cdf = thinkstats2.Cdf(ongoing).Shift(shift)
sf_ongoing = SurvivalFunction(cdf)
lams = {}
for t, ended in sorted(hist_complete.Items()):
at_risk = ended + n * sf_complete[t] + m * sf_ongoing[t]
lams[t] = ended / at_risk
#print(t, ended, n * sf_complete[t], m * sf_ongoing[t], at_risk)
return HazardFunction(lams, label=label)
def CleanData(resp):
"""Cleans a respondent DataFrame.
resp: DataFrame of respondents
"""
resp.cmmarrhx.replace([9997, 9998, 9999], np.nan, inplace=True)
resp['agemarry'] = (resp.cmmarrhx - resp.cmbirth) / 12.0
resp['age'] = (resp.cmintvw - resp.cmbirth) / 12.0
month0 = pandas.to_datetime('1899-12-15')
dates = [month0 + pandas.DateOffset(months=cm)
for cm in resp.cmbirth]
resp['decade'] = (pandas.DatetimeIndex(dates).year - 1900) // 10
def AddLabelsByDecade(groups, **options):
"""Draws fake points in order to add labels to the legend.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for name, _ in groups:
label = '%d0s' % name
thinkplot.Plot([15], [1], label=label, **options)
def EstimateSurvivalByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for _, group in groups:
_, sf = EstimateSurvival(group)
thinkplot.Plot(sf, **options)
def PlotPredictionsByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
hfs = []
for _, group in groups:
hf, sf = EstimateSurvival(group)
hfs.append(hf)
thinkplot.PrePlot(len(hfs))
for i, hf in enumerate(hfs):
if i > 0:
hf.Extend(hfs[i-1])
sf = hf.MakeSurvival()
thinkplot.Plot(sf, **options)
def ResampleSurvival(resp, iters=101):
"""Resamples respondents and estimates the survival function.
resp: DataFrame of respondents
iters: number of resamples
"""
_, sf = EstimateSurvival(resp)
thinkplot.Plot(sf)
low, high = resp.agemarry.min(), resp.agemarry.max()
ts = np.arange(low, high, 1/12.0)
ss_seq = []
for _ in range(iters):
sample = thinkstats2.ResampleRowsWeighted(resp)
_, sf = EstimateSurvival(sample)
ss_seq.append(sf.Probs(ts))
low, high = thinkstats2.PercentileRows(ss_seq, [5, 95])
thinkplot.FillBetween(ts, low, high, color='gray', label='90% CI')
thinkplot.Save(root='survival3',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[12, 46],
ylim=[0, 1],
formats=FORMATS)
def EstimateSurvival(resp):
"""Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
"""
complete = resp[resp.evrmarry == 1].agemarry
ongoing = resp[resp.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf
def PlotMarriageData(resp):
"""Plots hazard and survival functions.
resp: DataFrame of respondents
"""
hf, sf = EstimateSurvival(resp)
thinkplot.PrePlot(rows=2)
thinkplot.Plot(hf)
thinkplot.Config(legend=False)
thinkplot.SubPlot(2)
thinkplot.Plot(sf)
thinkplot.Save(root='survival2',
xlabel='age (years)',
ylabel='prob unmarried',
ylim=[0, 1],
legend=False,
formats=FORMATS)
return sf
def PlotPregnancyData(preg):
"""Plots survival and hazard curves based on pregnancy lengths.
preg:
"""
complete = preg.query('outcome in [1, 3, 4]').prglngth
print('Number of complete pregnancies', len(complete))
ongoing = preg[preg.outcome == 6].prglngth
print('Number of ongoing pregnancies', len(ongoing))
PlotSurvival(complete)
thinkplot.Save(root='survival1',
xlabel='t (weeks)',
formats=FORMATS)
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return sf
def PlotRemainingLifetime(sf1, sf2):
"""Plots remaining lifetimes for pregnancy and age at first marriage.
sf1: SurvivalFunction for pregnancy length
sf2: SurvivalFunction for age at first marriage
"""
thinkplot.PrePlot(cols=2)
rem_life1 = sf1.RemainingLifetime()
thinkplot.Plot(rem_life1)
thinkplot.Config(title='pregnancy length',
xlabel='weeks',
ylabel='mean remaining weeks')
thinkplot.SubPlot(2)
func = lambda pmf: pmf.Percentile(50)
rem_life2 = sf2.RemainingLifetime(filler=np.inf, func=func)
thinkplot.Plot(rem_life2)
thinkplot.Config(title='age at first marriage',
ylim=[0, 15],
xlim=[11, 31],
xlabel='age (years)',
ylabel='median remaining years')
thinkplot.Save(root='survival6',
formats=FORMATS)
def ReadFemResp(dct_file='2002FemResp.dct',
dat_file='2002FemResp.dat.gz',
**options):
"""Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file, encoding='iso-8859-1')
df = dct.ReadFixedWidth(dat_file, compression='gzip', **options)
CleanData(df)
return df
def ReadFemResp2002():
"""Reads respondent data from NSFG Cycle 6.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'finalwgt']
resp = ReadFemResp(usecols=usecols)
CleanData(resp)
return resp
def ReadFemResp2010():
"""Reads respondent data from NSFG Cycle 7.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgtq1q16']
resp = ReadFemResp('2006_2010_FemRespSetup.dct',
'2006_2010_FemResp.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgtq1q16
CleanData(resp)
return resp
def ReadFemResp2013():
"""Reads respondent data from NSFG Cycle 8.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgt2011_2013']
resp = ReadFemResp('2011_2013_FemRespSetup.dct',
'2011_2013_FemRespData.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgt2011_2013
CleanData(resp)
return resp
def ReadFemResp1995():
"""Reads respondent data from NSFG Cycle 5.
returns: DataFrame
"""
dat_file = '1995FemRespData.dat.gz'
names = ['a_doi', 'timesmar', 'mardat01', 'bdaycenm', 'post_wt']
colspecs = [(12359, 12363),
(3538, 3540),
(11758, 11762),
(13, 16),
(12349, 12359)]
df = pandas.read_fwf(dat_file,
compression='gzip',
colspecs=colspecs,
names=names)
df['cmmarrhx'] = df.mardat01
df['cmbirth'] = df.bdaycenm
df['cmintvw'] = df.a_doi
df['finalwgt'] = df.post_wt
df.timesmar.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.timesmar > 0).astype(int)
CleanData(df)
return df
def ReadFemResp1982():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1982NSFGData.dat.gz'
names = ['cmmarrhx', 'MARNO', 'cmintvw', 'cmbirth', 'finalwgt']
#actual = ['MARIMO', 'MARNO', 'TL', 'TL', 'W5']
colspecs = [(1028, 1031),
(1258, 1259),
(841, 844),
(12, 15),
(976, 982)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
df.MARNO.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.MARNO > 0).astype(int)
CleanData(df)
return df[:7969]
def ReadFemResp1988():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1988FemRespData.dat.gz'
names = ['F_13'] #['CMOIMO', 'F_13', 'F19M1MO', 'A_3']
# colspecs = [(799, 803)],
colspecs = [(20, 22)]#,
# (1538, 1542),
# (26, 30),
# (2568, 2574)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
# df['cmmarrhx'] = df.F19M1MO
# df['cmbirth'] = df.A_3
# df['cmintvw'] = df.CMOIMO
# df['finalwgt'] = df.W5
df.F_13.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.F_13 > 0).astype(int)
# CleanData(df)
return df
def PlotResampledByDecade(resps, iters=11, predict_flag=False, omit=None):
"""Plots survival curves for resampled data.
resps: list of DataFrames
iters: number of resamples to plot
predict_flag: whether to also plot predictions
"""
for i in range(iters):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
groups = sample.groupby('decade')
if omit:
groups = [(name, group) for name, group in groups
if name not in omit]
# TODO: refactor this to collect resampled estimates and
# plot shaded areas
if i == 0:
AddLabelsByDecade(groups, alpha=0.7)
if predict_flag:
PlotPredictionsByDecade(groups, alpha=0.1)
EstimateSurvivalByDecade(groups, alpha=0.1)
else:
EstimateSurvivalByDecade(groups, alpha=0.2)
def main():
thinkstats2.RandomSeed(17)
preg = nsfg.ReadFemPreg()
sf1 = PlotPregnancyData(preg)
# make the plots based on Cycle 6
resp6 = ReadFemResp2002()
sf2 = PlotMarriageData(resp6)
ResampleSurvival(resp6)
PlotRemainingLifetime(sf1, sf2)
# read Cycles 5 and 7
resp5 = ReadFemResp1995()
resp7 = ReadFemResp2010()
# plot resampled survival functions by decade
resps = [resp5, resp6, resp7]
PlotResampledByDecade(resps)
thinkplot.Save(root='survival4',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
# plot resampled survival functions by decade, with predictions
PlotResampledByDecade(resps, predict_flag=True, omit=[5])
thinkplot.Save(root='survival5',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
if __name__ == '__main__':
main()
| gpl-3.0 |
genialis/resolwe-bio | resolwe_bio/tools/plotcoverage_html.py | 1 | 4102 | #!/usr/bin/env python3
"""Plot amplicon coverage as HTML file with Bokeh."""
import argparse
import os
import pandas as pd
from bokeh import layouts
from bokeh.embed import components
from bokeh.models import (
BoxZoomTool,
HoverTool,
PanTool,
Range1d,
RedoTool,
ResetTool,
SaveTool,
UndoTool,
WheelZoomTool,
)
from bokeh.plotting import ColumnDataSource, figure, output_file
from jinja2 import Environment, FileSystemLoader
COLOR_CYCLE = [
"#586e75",
"#b58900",
"#268bd2",
"#cb4b16",
"#859900",
"#d33682",
"#2aa198",
"#dc322f",
"#073642",
"#6c71c4",
]
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Plot amplicon coverage as HTML file with Bokeh."
)
parser.add_argument("-i", "--infile", help="Input filename", required=True)
parser.add_argument("-t", "--template", help="Input filename", required=True)
parser.add_argument("-o", "--outfile", help="Output filename", required=True)
return parser.parse_args()
def main():
"""Invoke when run directly as a program."""
args = parse_arguments()
df = pd.read_csv(args.infile, sep=r"\s+", names=["amplicon", "meancov", "gene"])
df["offsetcov"] = df["meancov"] + 0.1 # shift zero values by 0.1
df = df.dropna().reset_index(drop=True)
# Make a hover (show amplicon name on mouse-over)
hover = HoverTool(tooltips=[("Amplicon", "@names"), ("Y value", "$y")])
tools = [
PanTool(),
BoxZoomTool(),
WheelZoomTool(),
RedoTool(),
UndoTool(),
ResetTool(),
hover,
SaveTool(),
]
# Produce plot
output_file(args.outfile)
fig = figure(tools=tools, width=1200, height=600, y_axis_type="log")
# Fill plot with one point for each amplicon:
xvals, yvals, labels, colors = [], [], [], []
for i, (name, group) in enumerate(df.groupby("gene", sort=False)):
xvals.extend(list(group.index))
yvals.extend(list(group.offsetcov))
labels.extend(list(group.amplicon))
# Elements in the same group should have the same color. Cycle between colors in COLOR_CYCLE:
colors.extend([COLOR_CYCLE[i % len(COLOR_CYCLE)]] * len(list(group.index)))
data = ColumnDataSource(data=dict(x=xvals, y=yvals, names=labels, colors=colors))
fig.circle(x="x", y="y", color="colors", size=10, source=data)
# Make span lines on 0.05, 0.1, 0.2, 1 and 5 mutiples of mean amplicon coverage:
mean_coverage = df.offsetcov.mean()
span_lines = [
(5.0, "Blue"),
(1.0, "Green"),
(0.2, "Red"),
(0.1, "Purple"),
(0.05, "Magenta"),
]
xmin, xmax = min(xvals) - 1, max(xvals) + 1
for ratio, color in span_lines:
fig.line(
[xmin, xmax],
[mean_coverage * ratio] * 2,
line_color=color,
line_dash="dashed",
legend="{:.0f} % of mean coverage".format(ratio * 100),
)
# Customize plot:
ymax = 2.0 * max(df.offsetcov.max() + 1000, mean_coverage * 5)
ymin = 0.2
fig.y_range = Range1d(ymin, ymax)
fig.x_range = Range1d(xmin, xmax)
fig.xaxis.major_tick_line_color = None # Turn off x-axis major ticks
fig.xaxis.minor_tick_line_color = None # Turn off x-axis minor ticks
fig.xaxis.major_label_text_font_size = "0pt" # Hack to remove tick labels
fig.xaxis.axis_label = "Amplicon"
fig.yaxis.axis_label = "Log10 (Amplicon coverage)"
fig.legend.location = "bottom_right"
script, div = components(layouts.row(fig))
with open(os.path.join(os.path.dirname(args.outfile), "plot.js"), "wt") as jfile:
jfile.write("\n".join(script.split("\n")[2:-1]))
with open(args.outfile, "wt") as ofile:
env = Environment(loader=FileSystemLoader(os.path.dirname(args.template)))
page_template = env.get_template(os.path.basename(args.template))
html_text = page_template.render({"bokeh_div": div})
ofile.write(html_text)
if __name__ == "__main__":
main()
| apache-2.0 |
wlamond/scikit-learn | examples/applications/plot_stock_market.py | 76 | 8522 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
wagnerpeer/gitexplorer | gitexplorer/visualizations/punchcard.py | 1 | 3117 | '''
Created on 28.08.2017
@author: Peer
'''
from collections import defaultdict
import datetime
from itertools import chain
import matplotlib.pyplot as plt
from gitexplorer.basics import GitExplorerBase
def draw_punchcard(infos,
xaxis_range=24,
yaxis_range=7,
xaxis_ticks=range(24),
yaxis_ticks=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'],
xaxis_label='Hour',
yaxis_label='Day'):
# build the array which contains the values
data = [[0.0] * xaxis_range for _ in range(yaxis_range)]
for key, value in infos.items():
data[key[0]][key[1]] = value
max_value = float(max(chain.from_iterable(data)))
# Draw the punchcard (create one circle per element)
# Ugly normalisation allows to obtain perfect circles instead of ovals....
for x in range(xaxis_range):
for y in range(yaxis_range):
circle = plt.Circle((x, y),
data[y][x] / 2 / max_value)
plt.gca().add_artist(circle)
plt.xlim(0, xaxis_range)
plt.ylim(0, yaxis_range)
plt.xticks(range(xaxis_range), xaxis_ticks)
plt.yticks(range(yaxis_range), yaxis_ticks)
plt.xlabel(xaxis_label)
plt.ylabel(yaxis_label)
plt.gca().invert_yaxis()
# make sure the axes are equal, and resize the canvas to fit the plot
plt.axis('scaled')
margin = 0.7
plt.axis([-margin, 23 + margin, 6 + margin, -margin])
scale = 0.5
plt.gcf().set_size_inches(xaxis_range * scale, yaxis_range * scale, forward=True)
plt.tight_layout()
def collect_data(commits):
'''
'''
information = defaultdict(int)
for commit in commits:
information[(commit['date'].isoweekday() - 1, commit['date'].hour)] += 1
return information
def find_commits(reference_day=datetime.datetime.today(),
days_before_reference=30,
number_of_commits=None):
'''Load commits from database meeting certain conditions.
Parameters
----------
days_before_reference: int (>=0), optional
Limit commits to number of days before reference_day
number_of_commits: int (>=0), optional
Limit the number of commits. If given it takes precedence before days_before_today.
Returns
-------
Documents meeting criteria defined through parameters
'''
criteria = {}
if(number_of_commits is None):
datetime_limit = reference_day - datetime.timedelta(days=days_before_reference)
criteria = {'date': {'$lte': reference_day, '$gte': datetime_limit}}
gitexplorer_database = GitExplorerBase.get_gitexplorer_database()
cursor = gitexplorer_database['commit_collection'].find(criteria)
if(number_of_commits is not None):
cursor = cursor.limit(number_of_commits)
return cursor
if(__name__ == '__main__'):
infos = collect_data(find_commits(days_before_reference=90,
number_of_commits=None))
draw_punchcard(infos)
plt.show()
| mit |
TomAugspurger/pandas | pandas/tests/tools/test_to_numeric.py | 1 | 18993 | import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, to_numeric
import pandas._testing as tm
@pytest.fixture(params=[None, "ignore", "raise", "coerce"])
def errors(request):
return request.param
@pytest.fixture(params=[True, False])
def signed(request):
return request.param
@pytest.fixture(params=[lambda x: x, str], ids=["identity", "str"])
def transform(request):
return request.param
@pytest.fixture(params=[47393996303418497800, 100000000000000000000])
def large_val(request):
return request.param
@pytest.fixture(params=[True, False])
def multiple_elts(request):
return request.param
@pytest.fixture(
params=[
(lambda x: Index(x, name="idx"), tm.assert_index_equal),
(lambda x: Series(x, name="ser"), tm.assert_series_equal),
(lambda x: np.array(Index(x).values), tm.assert_numpy_array_equal),
]
)
def transform_assert_equal(request):
return request.param
@pytest.mark.parametrize(
"input_kwargs,result_kwargs",
[
(dict(), dict(dtype=np.int64)),
(dict(errors="coerce", downcast="integer"), dict(dtype=np.int8)),
],
)
def test_empty(input_kwargs, result_kwargs):
# see gh-16302
ser = Series([], dtype=object)
result = to_numeric(ser, **input_kwargs)
expected = Series([], **result_kwargs)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("last_val", ["7", 7])
def test_series(last_val):
ser = Series(["1", "-3.14", last_val])
result = to_numeric(ser)
expected = Series([1, -3.14, 7])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[1, 3, 4, 5],
[1.0, 3.0, 4.0, 5.0],
# Bool is regarded as numeric.
[True, False, True, True],
],
)
def test_series_numeric(data):
ser = Series(data, index=list("ABCD"), name="EFG")
result = to_numeric(ser)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize(
"data,msg",
[
([1, -3.14, "apple"], 'Unable to parse string "apple" at position 2'),
(
["orange", 1, -3.14, "apple"],
'Unable to parse string "orange" at position 0',
),
],
)
def test_error(data, msg):
ser = Series(data)
with pytest.raises(ValueError, match=msg):
to_numeric(ser, errors="raise")
@pytest.mark.parametrize(
"errors,exp_data", [("ignore", [1, -3.14, "apple"]), ("coerce", [1, -3.14, np.nan])]
)
def test_ignore_error(errors, exp_data):
ser = Series([1, -3.14, "apple"])
result = to_numeric(ser, errors=errors)
expected = Series(exp_data)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"errors,exp",
[
("raise", 'Unable to parse string "apple" at position 2'),
("ignore", [True, False, "apple"]),
# Coerces to float.
("coerce", [1.0, 0.0, np.nan]),
],
)
def test_bool_handling(errors, exp):
ser = Series([True, False, "apple"])
if isinstance(exp, str):
with pytest.raises(ValueError, match=exp):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
expected = Series(exp)
tm.assert_series_equal(result, expected)
def test_list():
ser = ["1", "-3.14", "7"]
res = to_numeric(ser)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize(
"data,arr_kwargs",
[
([1, 3, 4, 5], dict(dtype=np.int64)),
([1.0, 3.0, 4.0, 5.0], dict()),
# Boolean is regarded as numeric.
([True, False, True, True], dict()),
],
)
def test_list_numeric(data, arr_kwargs):
result = to_numeric(data)
expected = np.array(data, **arr_kwargs)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(dtype="O"), dict()])
def test_numeric(kwargs):
data = [1, -3.14, 7]
ser = Series(data, **kwargs)
result = to_numeric(ser)
expected = Series(data)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"columns",
[
# One column.
"a",
# Multiple columns.
["a", "b"],
],
)
def test_numeric_df_columns(columns):
# see gh-14827
df = DataFrame(
dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), "0.1"],
b=[1.0, 2.0, 3.0, 4.0],
)
)
expected = DataFrame(dict(a=[1.2, 3.14, np.inf, 0.1], b=[1.0, 2.0, 3.0, 4.0]))
df_copy = df.copy()
df_copy[columns] = df_copy[columns].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
@pytest.mark.parametrize(
"data,exp_data",
[
(
[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1],
[[3.14, 1.0], 1.6, 0.1],
),
([np.array([decimal.Decimal(3.14), 1.0]), 0.1], [[3.14, 1.0], 0.1]),
],
)
def test_numeric_embedded_arr_likes(data, exp_data):
# Test to_numeric with embedded lists and arrays
df = DataFrame(dict(a=data))
df["a"] = df["a"].apply(to_numeric)
expected = DataFrame(dict(a=exp_data))
tm.assert_frame_equal(df, expected)
def test_all_nan():
ser = Series(["a", "b", "c"])
result = to_numeric(ser, errors="coerce")
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_type_check(errors):
# see gh-11776
df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
@pytest.mark.parametrize("val", [1, 1.1, 20001])
def test_scalar(val, signed, transform):
val = -val if signed else val
assert to_numeric(transform(val)) == float(val)
def test_really_large_scalar(large_val, signed, transform, errors):
# see gh-24910
kwargs = dict(errors=errors) if errors is not None else dict()
val = -large_val if signed else large_val
val = transform(val)
val_is_string = isinstance(val, str)
if val_is_string and errors in (None, "raise"):
msg = "Integer out of range. at position 0"
with pytest.raises(ValueError, match=msg):
to_numeric(val, **kwargs)
else:
expected = float(val) if (errors == "coerce" and val_is_string) else val
tm.assert_almost_equal(to_numeric(val, **kwargs), expected)
def test_really_large_in_arr(large_val, signed, transform, multiple_elts, errors):
# see gh-24910
kwargs = dict(errors=errors) if errors is not None else dict()
val = -large_val if signed else large_val
val = transform(val)
extra_elt = "string"
arr = [val] + multiple_elts * [extra_elt]
val_is_string = isinstance(val, str)
coercing = errors == "coerce"
if errors in (None, "raise") and (val_is_string or multiple_elts):
if val_is_string:
msg = "Integer out of range. at position 0"
else:
msg = 'Unable to parse string "string" at position 1'
with pytest.raises(ValueError, match=msg):
to_numeric(arr, **kwargs)
else:
result = to_numeric(arr, **kwargs)
exp_val = float(val) if (coercing and val_is_string) else val
expected = [exp_val]
if multiple_elts:
if coercing:
expected.append(np.nan)
exp_dtype = float
else:
expected.append(extra_elt)
exp_dtype = object
else:
exp_dtype = float if isinstance(exp_val, (int, float)) else object
tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype))
def test_really_large_in_arr_consistent(large_val, signed, multiple_elts, errors):
# see gh-24910
#
# Even if we discover that we have to hold float, does not mean
# we should be lenient on subsequent elements that fail to be integer.
kwargs = dict(errors=errors) if errors is not None else dict()
arr = [str(-large_val if signed else large_val)]
if multiple_elts:
arr.insert(0, large_val)
if errors in (None, "raise"):
index = int(multiple_elts)
msg = f"Integer out of range. at position {index}"
with pytest.raises(ValueError, match=msg):
to_numeric(arr, **kwargs)
else:
result = to_numeric(arr, **kwargs)
if errors == "coerce":
expected = [float(i) for i in arr]
exp_dtype = float
else:
expected = arr
exp_dtype = object
tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype))
@pytest.mark.parametrize(
"errors,checker",
[
("raise", 'Unable to parse string "fail" at position 0'),
("ignore", lambda x: x == "fail"),
("coerce", lambda x: np.isnan(x)),
],
)
def test_scalar_fail(errors, checker):
scalar = "fail"
if isinstance(checker, str):
with pytest.raises(ValueError, match=checker):
to_numeric(scalar, errors=errors)
else:
assert checker(to_numeric(scalar, errors=errors))
@pytest.mark.parametrize("data", [[1, 2, 3], [1.0, np.nan, 3, np.nan]])
def test_numeric_dtypes(data, transform_assert_equal):
transform, assert_equal = transform_assert_equal
data = transform(data)
result = to_numeric(data)
assert_equal(result, data)
@pytest.mark.parametrize(
"data,exp",
[
(["1", "2", "3"], np.array([1, 2, 3], dtype="int64")),
(["1.5", "2.7", "3.4"], np.array([1.5, 2.7, 3.4])),
],
)
def test_str(data, exp, transform_assert_equal):
transform, assert_equal = transform_assert_equal
result = to_numeric(transform(data))
expected = transform(exp)
assert_equal(result, expected)
def test_datetime_like(tz_naive_fixture, transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.date_range("20130101", periods=3, tz=tz_naive_fixture)
result = to_numeric(transform(idx))
expected = transform(idx.asi8)
assert_equal(result, expected)
def test_timedelta(transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.timedelta_range("1 days", periods=3, freq="D")
result = to_numeric(transform(idx))
expected = transform(idx.asi8)
assert_equal(result, expected)
def test_period(transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.period_range("2011-01", periods=3, freq="M", name="")
inp = transform(idx)
if isinstance(inp, Index):
result = to_numeric(inp)
expected = transform(idx.asi8)
assert_equal(result, expected)
else:
# TODO: PeriodDtype, so support it in to_numeric.
pytest.skip("Missing PeriodDtype support in to_numeric")
@pytest.mark.parametrize(
"errors,expected",
[
("raise", "Invalid object type at position 0"),
("ignore", Series([[10.0, 2], 1.0, "apple"])),
("coerce", Series([np.nan, 1.0, np.nan])),
],
)
def test_non_hashable(errors, expected):
# see gh-13324
ser = Series([[10.0, 2], 1.0, "apple"])
if isinstance(expected, str):
with pytest.raises(TypeError, match=expected):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
tm.assert_series_equal(result, expected)
def test_downcast_invalid_cast():
# see gh-13352
data = ["1", 2, 3]
invalid_downcast = "unsigned-integer"
msg = "invalid downcasting method provided"
with pytest.raises(ValueError, match=msg):
to_numeric(data, downcast=invalid_downcast)
def test_errors_invalid_value():
# see gh-26466
data = ["1", 2, 3]
invalid_error_value = "invalid"
msg = "invalid error value specified"
with pytest.raises(ValueError, match=msg):
to_numeric(data, errors=invalid_error_value)
@pytest.mark.parametrize(
"data",
[
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"),
],
)
@pytest.mark.parametrize(
"kwargs,exp_dtype",
[
# Basic function tests.
(dict(), np.int64),
(dict(downcast=None), np.int64),
# Support below np.float32 is rare and far between.
(dict(downcast="float"), np.dtype(np.float32).char),
# Basic dtype support.
(dict(downcast="unsigned"), np.dtype(np.typecodes["UnsignedInteger"][0])),
],
)
def test_downcast_basic(data, kwargs, exp_dtype):
# see gh-13352
result = to_numeric(data, **kwargs)
expected = np.array([1, 2, 3], dtype=exp_dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("signed_downcast", ["integer", "signed"])
@pytest.mark.parametrize(
"data",
[
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"),
],
)
def test_signed_downcast(data, signed_downcast):
# see gh-13352
smallest_int_dtype = np.dtype(np.typecodes["Integer"][0])
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
res = to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_invalid_data():
# If we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter.
data = ["foo", 2, 3]
expected = np.array(data, dtype=object)
res = to_numeric(data, errors="ignore", downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_neg_to_unsigned():
# Cannot cast to an unsigned integer
# because we have a negative number.
data = ["-1", 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"])
@pytest.mark.parametrize(
"data,expected",
[
(["1.1", 2, 3], np.array([1.1, 2, 3], dtype=np.float64)),
(
[10000.0, 20000, 3000, 40000.36, 50000, 50000.00],
np.array(
[10000.0, 20000, 3000, 40000.36, 50000, 50000.00], dtype=np.float64
),
),
],
)
def test_ignore_downcast_cannot_convert_float(data, expected, downcast):
# Cannot cast to an integer (signed or unsigned)
# because we have a float number.
res = to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize(
"downcast,expected_dtype",
[("integer", np.int16), ("signed", np.int16), ("unsigned", np.uint16)],
)
def test_downcast_not8bit(downcast, expected_dtype):
# the smallest integer dtype need not be np.(u)int8
data = ["256", 257, 258]
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize(
"dtype,downcast,min_max",
[
("int8", "integer", [iinfo(np.int8).min, iinfo(np.int8).max]),
("int16", "integer", [iinfo(np.int16).min, iinfo(np.int16).max]),
("int32", "integer", [iinfo(np.int32).min, iinfo(np.int32).max]),
("int64", "integer", [iinfo(np.int64).min, iinfo(np.int64).max]),
("uint8", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max]),
("uint16", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max]),
("uint32", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max]),
("uint64", "unsigned", [iinfo(np.uint64).min, iinfo(np.uint64).max]),
("int16", "integer", [iinfo(np.int8).min, iinfo(np.int8).max + 1]),
("int32", "integer", [iinfo(np.int16).min, iinfo(np.int16).max + 1]),
("int64", "integer", [iinfo(np.int32).min, iinfo(np.int32).max + 1]),
("int16", "integer", [iinfo(np.int8).min - 1, iinfo(np.int16).max]),
("int32", "integer", [iinfo(np.int16).min - 1, iinfo(np.int32).max]),
("int64", "integer", [iinfo(np.int32).min - 1, iinfo(np.int64).max]),
("uint16", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),
("uint32", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),
("uint64", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]),
],
)
def test_downcast_limits(dtype, downcast, min_max):
# see gh-14404: test the limits of each downcast.
series = to_numeric(Series(min_max), downcast=downcast)
assert series.dtype == dtype
@pytest.mark.parametrize(
"ser,expected",
[
(
pd.Series([0, 9223372036854775808]),
pd.Series([0, 9223372036854775808], dtype=np.uint64),
)
],
)
def test_downcast_uint64(ser, expected):
# see gh-14422:
# BUG: to_numeric doesn't work uint64 numbers
result = pd.to_numeric(ser, downcast="unsigned")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,exp_data",
[
(
[200, 300, "", "NaN", 30000000000000000000],
[200, 300, np.nan, np.nan, 30000000000000000000],
),
(
["12345678901234567890", "1234567890", "ITEM"],
[12345678901234567890, 1234567890, np.nan],
),
],
)
def test_coerce_uint64_conflict(data, exp_data):
# see gh-17007 and gh-17125
#
# Still returns float despite the uint64-nan conflict,
# which would normally force the casting to object.
result = to_numeric(Series(data), errors="coerce")
expected = Series(exp_data, dtype=float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"errors,exp",
[
("ignore", Series(["12345678901234567890", "1234567890", "ITEM"])),
("raise", "Unable to parse string"),
],
)
def test_non_coerce_uint64_conflict(errors, exp):
# see gh-17007 and gh-17125
#
# For completeness.
ser = Series(["12345678901234567890", "1234567890", "ITEM"])
if isinstance(exp, str):
with pytest.raises(ValueError, match=exp):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("dc1", ["integer", "float", "unsigned"])
@pytest.mark.parametrize("dc2", ["integer", "float", "unsigned"])
def test_downcast_empty(dc1, dc2):
# GH32493
tm.assert_numpy_array_equal(
pd.to_numeric([], downcast=dc1),
pd.to_numeric([], downcast=dc2),
check_dtype=False,
)
def test_failure_to_convert_uint64_string_to_NaN():
# GH 32394
result = to_numeric("uint64", errors="coerce")
assert np.isnan(result)
ser = Series([32, 64, np.nan])
result = to_numeric(pd.Series(["32", "64", "uint64"]), errors="coerce")
tm.assert_series_equal(result, ser)
| bsd-3-clause |
DLTK/DLTK | examples/applications/IXI_HH_DCGAN/train.py | 1 | 8472 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import argparse
import os
import pandas as pd
import tensorflow as tf
import numpy as np
from dltk.networks.gan.dcgan import dcgan_discriminator_3d, dcgan_generator_3d
from dltk.io.abstract_reader import Reader
from reader import read_fn
BATCH_SIZE = 8
MAX_STEPS = 35000
SAVE_SUMMARY_STEPS = 100
def train(args):
np.random.seed(42)
tf.set_random_seed(42)
print('Setting up...')
# Parse csv files for file names
all_filenames = pd.read_csv(
args.data_csv,
dtype=object,
keep_default_na=False,
na_values=[]).as_matrix()
train_filenames = all_filenames
# Set up a data reader to handle the file i/o.
reader_params = {'n_examples': 10,
'example_size': [4, 224, 224],
'extract_examples': True}
reader_example_shapes = {'labels': [4, 64, 64, 1],
'features': {'noise': [1, 1, 1, 100]}}
reader = Reader(read_fn, {'features': {'noise': tf.float32},
'labels': tf.float32})
# Get input functions and queue initialisation hooks for data
train_input_fn, train_qinit_hook = reader.get_inputs(
file_references=train_filenames,
mode=tf.estimator.ModeKeys.TRAIN,
example_shapes=reader_example_shapes,
batch_size=BATCH_SIZE,
params=reader_params)
# See TFGAN's `train.py` for a description of the generator and
# discriminator API.
def generator_fn(generator_inputs):
"""Generator function to build fake data samples. It creates a network
given input features (e.g. from a dltk.io.abstract_reader). Further,
custom Tensorboard summary ops can be added. For additional
information, please refer to https://www.tensorflow.org/versions/master/api_docs/python/tf/contrib/gan/estimator/GANEstimator.
Args:
generator_inputs (tf.Tensor): Noise input to generate samples from.
Returns:
tf.Tensor: Generated data samples
"""
gen = dcgan_generator_3d(
inputs=generator_inputs['noise'],
mode=tf.estimator.ModeKeys.TRAIN)
gen = gen['gen']
gen = tf.nn.tanh(gen)
return gen
def discriminator_fn(data, conditioning):
"""Discriminator function to discriminate real and fake data. It creates
a network given input features (e.g. from a dltk.io.abstract_reader).
Further, custom Tensorboard summary ops can be added. For additional
information, please refer to https://www.tensorflow.org/versions/master/api_docs/python/tf/contrib/gan/estimator/GANEstimator.
Args:
generator_inputs (tf.Tensor): Noise input to generate samples from.
Returns:
tf.Tensor: Generated data samples
"""
tf.summary.image('data', data[:, 0])
disc = dcgan_discriminator_3d(
inputs=data,
mode=tf.estimator.ModeKeys.TRAIN)
return disc['logits']
# get input tensors from queue
features, labels = train_input_fn()
# build generator
with tf.variable_scope('generator'):
gen = generator_fn(features)
# build discriminator on fake data
with tf.variable_scope('discriminator'):
disc_fake = discriminator_fn(gen, None)
# build discriminator on real data, reusing the previously created variables
with tf.variable_scope('discriminator', reuse=True):
disc_real = discriminator_fn(labels, None)
# building an LSGAN loss for the real examples
d_loss_real = tf.losses.mean_squared_error(
disc_real, tf.ones_like(disc_real))
# calculating a pseudo accuracy for the discriminator detecting a real
# sample and logging that
d_pred_real = tf.cast(tf.greater(disc_real, 0.5), tf.float32)
_, d_acc_real = tf.metrics.accuracy(tf.ones_like(disc_real), d_pred_real)
tf.summary.scalar('disc/real_acc', d_acc_real)
# building an LSGAN loss for the fake examples
d_loss_fake = tf.losses.mean_squared_error(
disc_fake, tf.zeros_like(disc_fake))
# calculating a pseudo accuracy for the discriminator detecting a fake
# sample and logging that
d_pred_fake = tf.cast(tf.greater(disc_fake, 0.5), tf.float32)
_, d_acc_fake = tf.metrics.accuracy(tf.zeros_like(disc_fake), d_pred_fake)
tf.summary.scalar('disc/fake_acc', d_acc_fake)
# building an LSGAN loss for the generator
g_loss = tf.losses.mean_squared_error(
disc_fake, tf.ones_like(disc_fake))
tf.summary.scalar('loss/gen', g_loss)
# combining the discriminator losses
d_loss = d_loss_fake + d_loss_real
tf.summary.scalar('loss/disc', d_loss)
# getting the list of discriminator variables
d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
'discriminator')
# building the discriminator optimizer
d_opt = tf.train.AdamOptimizer(
0.001, 0.5, epsilon=1e-5).minimize(d_loss, var_list=d_vars)
# getting the list of generator variables
g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
'generator')
# building the generator optimizer
g_opt = tf.train.AdamOptimizer(
0.001, 0.5, epsilon=1e-5).minimize(g_loss, var_list=g_vars)
# getting a variable to hold the global step
global_step = tf.train.get_or_create_global_step()
# build op to increment the global step - important for TensorBoard logging
inc_step = global_step.assign_add(1)
# build the training session.
# NOTE: we are not using a tf.estimator here, because they prevent some
# flexibility in the training procedure
s = tf.train.MonitoredTrainingSession(checkpoint_dir=args.model_path,
save_summaries_steps=100,
save_summaries_secs=None,
hooks=[train_qinit_hook])
# build dummy logging string
log = 'Step {} with Loss D: {}, Loss G: {}, Acc Real: {} Acc Fake: {}'
# start training
print('Starting training...')
loss_d = 0
loss_g = 0
try:
for step in range(MAX_STEPS):
# if discriminator is too good, only train generator
if not loss_g > 3 * loss_d:
s.run(d_opt)
# if generator is too good, only train discriminator
if not loss_d > 3 * loss_g:
s.run(g_opt)
# increment global step for logging hooks
s.run(inc_step)
# get statistics for training scheduling
loss_d, loss_g, acc_d, acc_g = s.run(
[d_loss, g_loss, d_acc_real, d_acc_fake])
# print stats for information
if step % SAVE_SUMMARY_STEPS == 0:
print(log.format(step, loss_d, loss_g, acc_d, acc_g))
except KeyboardInterrupt:
pass
print('Stopping now.')
if __name__ == '__main__':
# Set up argument parser
parser = argparse.ArgumentParser(description='Example: IXI HH LSGAN training script')
parser.add_argument('--run_validation', default=True)
parser.add_argument('--restart', default=False, action='store_true')
parser.add_argument('--verbose', default=False, action='store_true')
parser.add_argument('--cuda_devices', '-c', default='0')
parser.add_argument('--model_path', '-p', default='/tmp/IXI_dcgan/')
parser.add_argument('--data_csv', default='../../../data/IXI_HH/demographic_HH.csv')
args = parser.parse_args()
# Set verbosity
if args.verbose:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.logging.set_verbosity(tf.logging.INFO)
else:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
# GPU allocation options
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_devices
# Handle restarting and resuming training
if args.restart:
print('Restarting training from scratch.')
os.system('rm -rf {}'.format(args.model_path))
if not os.path.isdir(args.model_path):
os.system('mkdir -p {}'.format(args.model_path))
else:
print('Resuming training on model_path {}'.format(args.model_path))
# Call training
train(args)
| apache-2.0 |
NunoEdgarGub1/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/decomposition/tests/test_fastica.py | 70 | 7808 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| mit |
rbberger/lammps | examples/SPIN/test_problems/validation_damped_exchange/plot_precession.py | 9 | 1111 | #!/usr/bin/env python3
import numpy as np, pylab, tkinter
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from decimal import *
import sys, string, os
argv = sys.argv
if len(argv) != 3:
print("Syntax: ./plot_precession.py res_lammps.dat res_llg.dat")
sys.exit()
lammps_file = sys.argv[1]
llg_file = sys.argv[2]
t_lmp,Sx_lmp,Sy_lmp,Sz_lmp,e_lmp = np.loadtxt(lammps_file,skiprows=0, usecols=(1,2,3,4,7),unpack=True)
t_llg,Sx_llg,Sy_llg,Sz_llg,e_llg = np.loadtxt(llg_file,skiprows=0, usecols=(0,1,2,3,4),unpack=True)
plt.figure()
plt.subplot(411)
plt.ylabel('Sx')
plt.plot(t_lmp, Sx_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, Sx_llg, 'r--', label='LLG')
plt.subplot(412)
plt.ylabel('Sy')
plt.plot(t_lmp, Sy_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, Sy_llg, 'r--', label='LLG')
plt.subplot(413)
plt.ylabel('Sz')
plt.plot(t_lmp, Sz_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, Sz_llg, 'r--', label='LLG')
plt.subplot(414)
plt.ylabel('E (eV)')
plt.plot(t_lmp, e_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, e_llg, 'r--', label='LLG')
plt.xlabel('time (in ps)')
plt.legend()
plt.show()
| gpl-2.0 |
cbecker/LightGBM | python-package/lightgbm/sklearn.py | 1 | 32365 | # coding: utf-8
# pylint: disable = invalid-name, W0105, C0111, C0301
"""Scikit-Learn Wrapper interface for LightGBM."""
from __future__ import absolute_import
import numpy as np
from .basic import Dataset, LightGBMError
from .compat import (SKLEARN_INSTALLED, LGBMClassifierBase, LGBMDeprecated,
LGBMLabelEncoder, LGBMModelBase, LGBMRegressorBase, argc_,
range_)
from .engine import train
def _objective_function_wrapper(func):
"""Decorate an objective function
Note: for multi-class task, the y_pred is group by class_id first, then group by row_id
if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
and you should group grad and hess in this way as well
Parameters
----------
func: callable
Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group):
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples] or shape[n_samples * n_class] (for multi-class)
The predicted values
group: array_like
group/query data, used for ranking task
Returns
-------
new_func: callable
The new objective function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds: array_like, shape [n_samples] or shape[n_samples * n_class]
The predicted values
dataset: ``dataset``
The training set from which the labels will be extracted using
``dataset.get_label()``
"""
def inner(preds, dataset):
"""internal function"""
labels = dataset.get_label()
argc = argc_(func)
if argc == 2:
grad, hess = func(labels, preds)
elif argc == 3:
grad, hess = func(labels, preds, dataset.get_group())
else:
raise TypeError("Self-defined objective function should have 2 or 3 arguments, got %d" % argc)
"""weighted for objective"""
weight = dataset.get_weight()
if weight is not None:
"""only one class"""
if len(weight) == len(grad):
grad = np.multiply(grad, weight)
hess = np.multiply(hess, weight)
else:
num_data = len(weight)
num_class = len(grad) // num_data
if num_class * num_data != len(grad):
raise ValueError("Length of grad and hess should equal to num_class * num_data")
for k in range_(num_class):
for i in range_(num_data):
idx = k * num_data + i
grad[idx] *= weight[i]
hess[idx] *= weight[i]
return grad, hess
return inner
def _eval_function_wrapper(func):
"""Decorate an eval function
Note: for multi-class task, the y_pred is group by class_id first, then group by row_id
if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
Parameters
----------
func: callable
Expects a callable with following functions:
``func(y_true, y_pred)``,
``func(y_true, y_pred, weight)``
or ``func(y_true, y_pred, weight, group)``
and return (eval_name->str, eval_result->float, is_bigger_better->Bool):
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples] or shape[n_samples * n_class] (for multi-class)
The predicted values
weight: array_like of shape [n_samples]
The weight of samples
group: array_like
group/query data, used for ranking task
Returns
-------
new_func: callable
The new eval function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds: array_like, shape [n_samples] or shape[n_samples * n_class]
The predicted values
dataset: ``dataset``
The training set from which the labels will be extracted using
``dataset.get_label()``
"""
def inner(preds, dataset):
"""internal function"""
labels = dataset.get_label()
argc = argc_(func)
if argc == 2:
return func(labels, preds)
elif argc == 3:
return func(labels, preds, dataset.get_weight())
elif argc == 4:
return func(labels, preds, dataset.get_weight(), dataset.get_group())
else:
raise TypeError("Self-defined eval function should have 2, 3 or 4 arguments, got %d" % argc)
return inner
class LGBMModel(LGBMModelBase):
def __init__(self, boosting_type="gbdt", num_leaves=31, max_depth=-1,
learning_rate=0.1, n_estimators=10, max_bin=255,
subsample_for_bin=50000, objective="regression",
min_split_gain=0, min_child_weight=5, min_child_samples=10,
subsample=1, subsample_freq=1, colsample_bytree=1,
reg_alpha=0, reg_lambda=0, scale_pos_weight=1,
is_unbalance=False, seed=0, nthread=-1, silent=True,
sigmoid=1.0, huber_delta=1.0, gaussian_eta=1.0, fair_c=1.0,
max_position=20, label_gain=None,
drop_rate=0.1, skip_drop=0.5, max_drop=50,
uniform_drop=False, xgboost_dart_mode=False):
"""
Implementation of the Scikit-Learn API for LightGBM.
Parameters
----------
boosting_type : string
gbdt, traditional Gradient Boosting Decision Tree
dart, Dropouts meet Multiple Additive Regression Trees
num_leaves : int
Maximum tree leaves for base learners.
max_depth : int
Maximum tree depth for base learners, -1 means no limit.
learning_rate : float
Boosting learning rate
n_estimators : int
Number of boosted trees to fit.
max_bin : int
Number of bucketed bin for feature values
subsample_for_bin : int
Number of samples for constructing bins.
objective : string or callable
Specify the learning task and the corresponding learning objective or
a custom objective function to be used (see note below).
default: binary for LGBMClassifier, lambdarank for LGBMRanker
min_split_gain : float
Minimum loss reduction required to make a further partition on a leaf node of the tree.
min_child_weight : int
Minimum sum of instance weight(hessian) needed in a child(leaf)
min_child_samples : int
Minimum number of data need in a child(leaf)
subsample : float
Subsample ratio of the training instance.
subsample_freq : int
frequence of subsample, <=0 means no enable
colsample_bytree : float
Subsample ratio of columns when constructing each tree.
reg_alpha : float
L1 regularization term on weights
reg_lambda : float
L2 regularization term on weights
scale_pos_weight : float
Balancing of positive and negative weights.
is_unbalance : bool
Is unbalance for binary classification
seed : int
Random number seed.
nthread : int
Number of parallel threads
silent : boolean
Whether to print messages while running boosting.
sigmoid : float
Only used in binary classification and lambdarank. Parameter for sigmoid function.
huber_delta : float
Only used in regression. Parameter for Huber loss function.
gaussian_eta : float
Only used in regression. Parameter for L1 and Huber loss function.
It is used to control the width of Gaussian function to approximate hessian.
fair_c : float
Only used in regression. Parameter for Fair loss function.
max_position : int
Only used in lambdarank, will optimize NDCG at this position.
label_gain : list of float
Only used in lambdarank, relevant gain for labels.
For example, the gain of label 2 is 3 if using default label gains.
None (default) means use default value of CLI version: {0,1,3,7,15,31,63,...}.
drop_rate : float
Only used when boosting_type='dart'. Probablity to select dropping trees.
skip_drop : float
Only used when boosting_type='dart'. Probablity to skip dropping trees.
max_drop : int
Only used when boosting_type='dart'. Max number of dropped trees in one iteration.
uniform_drop : bool
Only used when boosting_type='dart'. If true, drop trees uniformly, else drop according to weights.
xgboost_dart_mode : bool
Only used when boosting_type='dart'. Whether use xgboost dart mode.
Note
----
A custom objective function can be provided for the ``objective``
parameter. In this case, it should have the signature
``objective(y_true, y_pred) -> grad, hess``
or ``objective(y_true, y_pred, group) -> grad, hess``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples] or shape[n_samples * n_class]
The predicted values
group: array_like
group/query data, used for ranking task
grad: array_like of shape [n_samples] or shape[n_samples * n_class]
The value of the gradient for each sample point.
hess: array_like of shape [n_samples] or shape[n_samples * n_class]
The value of the second derivative for each sample point
for multi-class task, the y_pred is group by class_id first, then group by row_id
if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
and you should group grad and hess in this way as well
"""
if not SKLEARN_INSTALLED:
raise LightGBMError('Scikit-learn is required for this module')
self.boosting_type = boosting_type
self.num_leaves = num_leaves
self.max_depth = max_depth
self.learning_rate = learning_rate
self.n_estimators = n_estimators
self.max_bin = max_bin
self.subsample_for_bin = subsample_for_bin
self.objective = objective
self.min_split_gain = min_split_gain
self.min_child_weight = min_child_weight
self.min_child_samples = min_child_samples
self.subsample = subsample
self.subsample_freq = subsample_freq
self.colsample_bytree = colsample_bytree
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.scale_pos_weight = scale_pos_weight
self.is_unbalance = is_unbalance
self.seed = seed
self.nthread = nthread
self.silent = silent
self.sigmoid = sigmoid
self.huber_delta = huber_delta
self.gaussian_eta = gaussian_eta
self.fair_c = fair_c
self.max_position = max_position
self.label_gain = label_gain
self.drop_rate = drop_rate
self.skip_drop = skip_drop
self.max_drop = max_drop
self.uniform_drop = uniform_drop
self.xgboost_dart_mode = xgboost_dart_mode
self._Booster = None
self.evals_result = None
self.best_iteration = -1
if callable(self.objective):
self.fobj = _objective_function_wrapper(self.objective)
else:
self.fobj = None
def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_sample_weight=None,
eval_init_score=None, eval_group=None,
eval_metric=None,
early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto',
callbacks=None):
"""
Fit the gradient boosting model
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
weight of training data
init_score : array_like
init score of training data
group : array_like
group data of training data
eval_set : list, optional
A list of (X, y) tuple pairs to use as a validation set for early-stopping
eval_sample_weight : List of array
weight of eval data
eval_init_score : List of array
init score of eval data
eval_group : List of array
group data of eval data
eval_metric : str, list of str, callable, optional
If a str, should be a built-in evaluation metric to use.
If callable, a custom evaluation metric, see note for more details.
early_stopping_rounds : int
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
feature_name : list of str, or 'auto'
Feature names
If 'auto' and data is pandas DataFrame, use data columns name
categorical_feature : list of str or int, or 'auto'
Categorical features,
type int represents index,
type str represents feature names (need to specify feature_name as well)
If 'auto' and data is pandas DataFrame, use pandas categorical columns
callbacks : list of callback functions
List of callback functions that are applied at each iteration.
See Callbacks in Python-API.md for more information.
Note
----
Custom eval function expects a callable with following functions:
``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)``
or ``func(y_true, y_pred, weight, group)``.
return (eval_name, eval_result, is_bigger_better)
or list of (eval_name, eval_result, is_bigger_better)
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples] or shape[n_samples * n_class] (for multi-class)
The predicted values
weight: array_like of shape [n_samples]
The weight of samples
group: array_like
group/query data, used for ranking task
eval_name: str
name of evaluation
eval_result: float
eval result
is_bigger_better: bool
is eval result bigger better, e.g. AUC is bigger_better.
for multi-class task, the y_pred is group by class_id first, then group by row_id
if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
"""
evals_result = {}
params = self.get_params()
params['verbose'] = -1 if self.silent else 1
if hasattr(self, 'n_classes_') and self.n_classes_ > 2:
params['num_class'] = self.n_classes_
if hasattr(self, 'eval_at'):
params['ndcg_eval_at'] = self.eval_at
if self.fobj:
params['objective'] = 'None' # objective = nullptr for unknown objective
if 'label_gain' in params and params['label_gain'] is None:
del params['label_gain'] # use default of cli version
if callable(eval_metric):
feval = _eval_function_wrapper(eval_metric)
else:
feval = None
params['metric'] = eval_metric
def _construct_dataset(X, y, sample_weight, init_score, group, params):
ret = Dataset(X, label=y, max_bin=self.max_bin, weight=sample_weight, group=group, params=params)
ret.set_init_score(init_score)
return ret
train_set = _construct_dataset(X, y, sample_weight, init_score, group, params)
valid_sets = []
if eval_set is not None:
if isinstance(eval_set, tuple):
eval_set = [eval_set]
for i, valid_data in enumerate(eval_set):
"""reduce cost for prediction training data"""
if valid_data[0] is X and valid_data[1] is y:
valid_set = train_set
else:
def get_meta_data(collection, i):
if collection is None:
return None
elif isinstance(collection, list):
return collection[i] if len(collection) > i else None
elif isinstance(collection, dict):
return collection.get(i, None)
else:
raise TypeError('eval_sample_weight, eval_init_score, and eval_group should be dict or list')
valid_weight = get_meta_data(eval_sample_weight, i)
valid_init_score = get_meta_data(eval_init_score, i)
valid_group = get_meta_data(eval_group, i)
valid_set = _construct_dataset(valid_data[0], valid_data[1], valid_weight, valid_init_score, valid_group, params)
valid_sets.append(valid_set)
self._Booster = train(params, train_set,
self.n_estimators, valid_sets=valid_sets,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, fobj=self.fobj, feval=feval,
verbose_eval=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
if evals_result:
self.evals_result = evals_result
if early_stopping_rounds is not None:
self.best_iteration = self._Booster.best_iteration
return self
def predict(self, X, raw_score=False, num_iteration=0):
"""
Return the predicted value for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
num_iteration : int
Limit number of iterations in the prediction; defaults to 0 (use all trees).
Returns
-------
predicted_result : array_like, shape=[n_samples] or [n_samples, n_classes]
"""
return self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration)
def apply(self, X, num_iteration=0):
"""
Return the predicted leaf every tree for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
num_iteration : int
Limit number of iterations in the prediction; defaults to 0 (use all trees).
Returns
-------
X_leaves : array_like, shape=[n_samples, n_trees]
"""
return self.booster_.predict(X, pred_leaf=True, num_iteration=num_iteration)
@property
def booster_(self):
"""Get the underlying lightgbm Booster of this model."""
if self._Booster is None:
raise LightGBMError('No booster found. Need to call fit beforehand.')
return self._Booster
@property
def evals_result_(self):
"""Get the evaluation results."""
if self.evals_result is None:
raise LightGBMError('No results found. Need to call fit with eval set beforehand.')
return self.evals_result
@property
def feature_importances_(self):
"""Get normailized feature importances."""
importace_array = self.booster_.feature_importance().astype(np.float32)
return importace_array / importace_array.sum()
@LGBMDeprecated('Use attribute booster_ instead.')
def booster(self):
return self.booster_
@LGBMDeprecated('Use attribute feature_importances_ instead.')
def feature_importance(self):
return self.feature_importances_
class LGBMRegressor(LGBMModel, LGBMRegressorBase):
def __init__(self, boosting_type="gbdt", num_leaves=31, max_depth=-1,
learning_rate=0.1, n_estimators=10, max_bin=255,
subsample_for_bin=50000, objective="regression",
min_split_gain=0, min_child_weight=5, min_child_samples=10,
subsample=1, subsample_freq=1, colsample_bytree=1,
reg_alpha=0, reg_lambda=0,
seed=0, nthread=-1, silent=True,
huber_delta=1.0, gaussian_eta=1.0, fair_c=1.0,
drop_rate=0.1, skip_drop=0.5, max_drop=50,
uniform_drop=False, xgboost_dart_mode=False):
super(LGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves,
max_depth=max_depth, learning_rate=learning_rate,
n_estimators=n_estimators, max_bin=max_bin,
subsample_for_bin=subsample_for_bin, objective=objective,
min_split_gain=min_split_gain, min_child_weight=min_child_weight,
min_child_samples=min_child_samples, subsample=subsample,
subsample_freq=subsample_freq, colsample_bytree=colsample_bytree,
reg_alpha=reg_alpha, reg_lambda=reg_lambda,
seed=seed, nthread=nthread, silent=silent,
huber_delta=huber_delta, gaussian_eta=gaussian_eta, fair_c=fair_c,
drop_rate=drop_rate, skip_drop=skip_drop, max_drop=max_drop,
uniform_drop=uniform_drop, xgboost_dart_mode=xgboost_dart_mode)
def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_sample_weight=None,
eval_init_score=None,
eval_metric="l2",
early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
super(LGBMRegressor, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, eval_set=eval_set,
eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
class LGBMClassifier(LGBMModel, LGBMClassifierBase):
def __init__(self, boosting_type="gbdt", num_leaves=31, max_depth=-1,
learning_rate=0.1, n_estimators=10, max_bin=255,
subsample_for_bin=50000, objective="binary",
min_split_gain=0, min_child_weight=5, min_child_samples=10,
subsample=1, subsample_freq=1, colsample_bytree=1,
reg_alpha=0, reg_lambda=0, scale_pos_weight=1,
is_unbalance=False, seed=0, nthread=-1,
silent=True, sigmoid=1.0,
drop_rate=0.1, skip_drop=0.5, max_drop=50,
uniform_drop=False, xgboost_dart_mode=False):
self.classes, self.n_classes = None, None
super(LGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves,
max_depth=max_depth, learning_rate=learning_rate,
n_estimators=n_estimators, max_bin=max_bin,
subsample_for_bin=subsample_for_bin, objective=objective,
min_split_gain=min_split_gain, min_child_weight=min_child_weight,
min_child_samples=min_child_samples, subsample=subsample,
subsample_freq=subsample_freq, colsample_bytree=colsample_bytree,
reg_alpha=reg_alpha, reg_lambda=reg_lambda,
scale_pos_weight=scale_pos_weight, is_unbalance=is_unbalance,
seed=seed, nthread=nthread, silent=silent, sigmoid=sigmoid,
drop_rate=drop_rate, skip_drop=skip_drop, max_drop=max_drop,
uniform_drop=uniform_drop, xgboost_dart_mode=xgboost_dart_mode)
def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_sample_weight=None,
eval_init_score=None,
eval_metric="binary_logloss",
early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto',
callbacks=None):
self._le = LGBMLabelEncoder().fit(y)
y = self._le.transform(y)
self.classes = self._le.classes_
self.n_classes = len(self.classes_)
if self.n_classes > 2:
# Switch to using a multiclass objective in the underlying LGBM instance
self.objective = "multiclass"
if eval_metric == "binary_logloss":
eval_metric = "multi_logloss"
if eval_set is not None:
eval_set = [(x[0], self._le.transform(x[1])) for x in eval_set]
super(LGBMClassifier, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, eval_set=eval_set,
eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
def predict(self, X, raw_score=False, num_iteration=0):
class_probs = self.predict_proba(X, raw_score, num_iteration)
class_index = np.argmax(class_probs, axis=1)
return self._le.inverse_transform(class_index)
def predict_proba(self, X, raw_score=False, num_iteration=0):
"""
Return the predicted probability for each class for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
num_iteration : int
Limit number of iterations in the prediction; defaults to 0 (use all trees).
Returns
-------
predicted_probability : array_like, shape=[n_samples, n_classes]
"""
class_probs = self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration)
if self.n_classes > 2:
return class_probs
else:
return np.vstack((1. - class_probs, class_probs)).transpose()
@property
def classes_(self):
"""Get class label array."""
if self.classes is None:
raise LightGBMError('No classes found. Need to call fit beforehand.')
return self.classes
@property
def n_classes_(self):
"""Get number of classes"""
if self.n_classes is None:
raise LightGBMError('No classes found. Need to call fit beforehand.')
return self.n_classes
class LGBMRanker(LGBMModel):
def __init__(self, boosting_type="gbdt", num_leaves=31, max_depth=-1,
learning_rate=0.1, n_estimators=10, max_bin=255,
subsample_for_bin=50000, objective="lambdarank",
min_split_gain=0, min_child_weight=5, min_child_samples=10,
subsample=1, subsample_freq=1, colsample_bytree=1,
reg_alpha=0, reg_lambda=0, scale_pos_weight=1,
is_unbalance=False, seed=0, nthread=-1, silent=True,
sigmoid=1.0, max_position=20, label_gain=None,
drop_rate=0.1, skip_drop=0.5, max_drop=50,
uniform_drop=False, xgboost_dart_mode=False):
super(LGBMRanker, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves,
max_depth=max_depth, learning_rate=learning_rate,
n_estimators=n_estimators, max_bin=max_bin,
subsample_for_bin=subsample_for_bin, objective=objective,
min_split_gain=min_split_gain, min_child_weight=min_child_weight,
min_child_samples=min_child_samples, subsample=subsample,
subsample_freq=subsample_freq, colsample_bytree=colsample_bytree,
reg_alpha=reg_alpha, reg_lambda=reg_lambda,
scale_pos_weight=scale_pos_weight, is_unbalance=is_unbalance,
seed=seed, nthread=nthread, silent=silent,
sigmoid=sigmoid, max_position=max_position, label_gain=label_gain,
drop_rate=drop_rate, skip_drop=skip_drop, max_drop=max_drop,
uniform_drop=uniform_drop, xgboost_dart_mode=xgboost_dart_mode)
def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_sample_weight=None,
eval_init_score=None, eval_group=None,
eval_metric='ndcg', eval_at=1,
early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto',
callbacks=None):
"""
Most arguments like common methods except following:
eval_at : list of int
The evaulation positions of NDCG
"""
"""check group data"""
if group is None:
raise ValueError("Should set group for ranking task")
if eval_set is not None:
if eval_group is None:
raise ValueError("Eval_group cannot be None when eval_set is not None")
elif len(eval_group) != len(eval_set):
raise ValueError("Length of eval_group should equal to eval_set")
elif (isinstance(eval_group, dict) and any(i not in eval_group or eval_group[i] is None for i in range_(len(eval_group)))) \
or (isinstance(eval_group, list) and any(group is None for group in eval_group)):
raise ValueError("Should set group for all eval dataset for ranking task; if you use dict, the index should start from 0")
if eval_at is not None:
self.eval_at = eval_at
super(LGBMRanker, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, group=group,
eval_set=eval_set, eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score, eval_group=eval_group,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
| mit |
MikkelsCykel/LightweightCrypto | Project2/Code/S113408-DPA.py | 1 | 3720 |
# coding: utf-8
# Project Constants and imports:
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import math
mpl.rc("figure", facecolor="white")
input_data_path = 'inputs8.txt'
input_t_path = 'T8.txt'
nr_observations = np.arange(0, 55, 1)
nr_samples = np.arange(0, 600, 1)
# Auxiliary Functions:
hamming_weight = lambda x: bin(x).count('1')
xor = lambda x,y: (x ^ y) % 256;
average = lambda x: sum(x) / len(x)
# Load data
T = np.loadtxt(input_t_path, delimiter=',')
Inputs = np.loadtxt(input_data_path, delimiter=',', dtype=(int))
sbox =[0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16]
# Illustrate T data:
for i in xrange(0,599):
plt.plot(nr_observations,T[i])
plt.xlim([0, 54])
plt.xlabel('Samples')
plt.ylabel('Power Trace')
plt.show()
# Predict Hamming Weight:
def get_hamming_weight_table(inputs):
A = []
for ith_input in inputs:
A.append([hamming_weight(sbox[xor(ith_input, k)]) for k in xrange(1,257)])
return np.array(A)
HW = get_hamming_weight_table(Inputs)
# Illustrate a Hamming Weight Instance:
plt.plot(nr_samples,HW[:,0])
plt.xlim([0, 600])
plt.xlabel('N')
plt.ylabel('Hamming Weight')
plt.show()
# Correlate Hamming Weights (HW) and Power Consumptions (T):
def correlation(x, y):
n = len(x)
avg_x = average(x)
avg_y = average(y)
dx = dy = dp = dxpow = dypow = 0
for i in xrange(n):
dx = x[i] - avg_x
dy = y[i] - avg_y
dp += dx * dy
dxpow += dx * dx
dypow += dy * dy
return dp / math.sqrt(dxpow * dypow)
Corr = []
for o in xrange(0, 256):
Corr.append([correlation(HW[:,o],T[:,t]) for t in xrange(0,55)])
Corr = np.array(Corr)
# Plot Correlations:
for i in xrange(0,256):
plt.plot(nr_observations,Corr[i])
plt.xlim([0, 54])
plt.xlabel('Samples')
plt.ylabel('Correlation')
plt.show()
# Filter, plot, and print best key kandidate:
for i in xrange(0,256):
if (len([q for q in Corr[i] if q > 0.2]) > 0):
print 'The best matched key is: ', i+1 # +1 since keyspace 1 <= k >= 256 and the array is 0-indexed
plt.plot(nr_observations,Corr[i])
plt.xlim([0, 54])
plt.xlabel('Samples')
plt.ylabel('Correlation')
plt.show() | gpl-2.0 |
mjsauvinen/P4UL | pyNetCDF/reynoldsStressNetCdf.py | 1 | 6222 | #!/usr/bin/env python3
import sys
import numpy as np
import argparse
import matplotlib.pyplot as plt
from analysisTools import sensibleIds, groundOffset, quadrantAnalysis
from netcdfTools import read3dDataFromNetCDF, netcdfOutputDataset, \
createNetcdfVariable, netcdfWriteAndClose
from utilities import filesFromList, inputIfNone
from txtTools import openIOFile
'''
Description: Reynolds stress calculator.
In case of PALM-generated results (featuring staggered grid), the velocity data must first be
interpolated onto cell-centers (i.e. scalar grid) with groupVectorDataNetCdf.py script.
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
sepStr = ' # = # = # = # = # = # = # = # = '
parser = argparse.ArgumentParser()
parser.add_argument("strKey", type=str,nargs='?', default=None,\
help="Search string for collecting input NETCDF files.")
parser.add_argument("-v", "--varnames", type=str, nargs=2, default=['u','w'],\
help="Name of the variables in NETCDF file. Default=[u, w]")
parser.add_argument("-i1", "--ijk1",type=int, nargs=3,\
help="Starting indices (ix, iy, iz) of the considered data. Required.")
parser.add_argument("-i2", "--ijk2",type=int, nargs=3,\
help="Final indices (ix, iy, iz) of the considered data. Required.")
parser.add_argument("-vs", "--vstar",type=float, nargs=2, default=[1.,1.],\
help="Characteristic value v* (vs) used in (v+ =(v-v0)/v*). Default=[1,1].")
parser.add_argument("-v0", "--vref",type=float, nargs=2, default=[0.,0.],\
help="Reference value v0 (vref) used in (v+ =(v-v0)/v*). Default=[0,0].")
parser.add_argument("-xs", "--xscale",type=float, default=1.,\
help="Coordinate scaling value (xs) used in (x+ =x/xs). Default=1.")
parser.add_argument("-of", "--outputToFile", type=str, default=None, \
help="Name of the file to output analysis results. Default=None")
parser.add_argument("-p", "--printOn", action="store_true", default=False,\
help="Print the numpy array data.")
args = parser.parse_args()
#==========================================================#
# Rename ...
strKey = args.strKey
varnames = args.varnames
v0 = np.array( args.vref ) # Convert to numpy array
vs = np.array( args.vstar )
xs = args.xscale
ijk1 = args.ijk1
ijk2 = args.ijk2
printOn = args.printOn
#==========================================================#
'''
Establish two boolean variables which indicate whether the created variable is an
independent or dependent variable in function createNetcdfVariable().
'''
parameter = True; variable = False
strKey = inputIfNone( strKey , " Enter search string: " )
fileNos, fileList = filesFromList( strKey+"*")
for fn in fileNos:
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# First fluctuation component
cl = 1
ncDict = read3dDataFromNetCDF( fileList[fn] , varnames[0], cl )
v1 = ncDict['v'] # 'v' is a generic name for a variable in ncDict
# Second fluctuation component
ncDict = read3dDataFromNetCDF( fileList[fn] , varnames[1], cl )
v2 = ncDict['v']
# Dims
nt, nz, ny, nx = np.shape( v1 )
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Spatial coords and time
x = ncDict['x']; y = ncDict['y']; z = ncDict['z']
time = ncDict['time']
ncDict = None
# Plot coord. information. This aids the user in the beginning.
infoStr = '''
Coord. range:
min(x)={0} ... max(x)={1}, nx = {2}
min(y)={3} ... max(y)={4}, ny = {5}
min(z)={6} ... max(z)={7}, nz = {8}
'''.format(\
np.min(x), np.max(x), len(x),\
np.min(y), np.max(y), len(y),\
np.min(z), np.max(z), len(z) )
#print(infoStr)
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Non-dimensionalize the time series
v1 -= v0[0]; v2 -= v0[1]
v1 /= vs[0]; v2 /= vs[1]
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Extract the fluctuations
v1m = np.mean(v1, axis=(0))
v2m = np.mean(v2, axis=(0))
# Extract fluctuating part and normalize by variance
# Reuse the v1 and v2 variables to store values
v1 -= v1m; v2 -= v2m
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Now check whether the given indices make sense
ijk1 = sensibleIds( np.array( ijk1 ), x, y, z )
ijk2 = sensibleIds( np.array( ijk2 ), x, y, z )
print(' Check (1): i, j, k = {}'.format(ijk1))
print(' Check (2): i, j, k = {}'.format(ijk2))
nvz = (ijk2[2]-ijk1[2])+1; idz = range(ijk1[2],ijk2[2]+1)
nvy = (ijk2[1]-ijk1[1])+1; idy = range(ijk1[1],ijk2[1]+1)
nvx = (ijk2[0]-ijk1[0])+1; idx = range(ijk1[0],ijk2[0]+1)
Cv = np.zeros( ( nt, nvz, nvy, nvx ) )
d = np.zeros( ( nvz, nvy, nvx ) )
zd = np.zeros( ( nvz ) )
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Compute covariance
for i in range(nvx):
for j in range(nvy):
for k in range(nvz):
if( np.abs(v1[1,idz[k],idy[j],idx[i]])>1e-7 and np.abs(v2[1,idz[k],idy[j],idx[i]])>1e-7 ):
Cv[:,k,j,i] = v1[ :,idz[k], idy[j], idx[i] ] * v2[ :,idz[k], idy[j], idx[i] ]
else:
Cv[:,k,j,i] = np.nan
d[k,j,i] = np.sqrt( (z[idz[k]]-z[0])**2 + (y[idy[j]]-y[0])**2 + (x[idx[i]]-x[0])**2 )
zd[k] = np.abs(z[idz[k]]-z[0])
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Reynolds stress
Rs = np.nanmean( Cv, axis=(0) )
Rs_havg = np.nanmean( Rs , axis=(1,2) ) # average over x and y
hStr = " Reynolds averaged {}'{}' between ijk {} and {} ".format(varnames[0],varnames[1], ijk1,ijk2)
fileout = '{}{}_UEX'.format(varnames[0],varnames[1]) + fileList[fn].split('/')[-1]
fileout = fileout.strip('.nc') + '.dat'
np.savetxt(fileout, np.c_[ (1./xs)*d.ravel(), Rs.ravel() ], fmt='%3.6e', header=hStr)
# - - - - - <RS> - - - - - #
hStr = " Horizontally and Reynolds avg {}'{}' between z=[{},{}]".format(varnames[0],varnames[1], zd[0],zd[-1])
fileout = 'DA_{}{}_'.format(varnames[0],varnames[1]) + fileList[fn].split('/')[-1]
fileout = fileout.strip('.nc') + '.dat'
np.savetxt(fileout, np.c_[ (1./xs)*zd.ravel(), Rs_havg.ravel() ], fmt='%3.6e', header=hStr)
| mit |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/io/parser/na_values.py | 6 | 10526 | # -*- coding: utf-8 -*-
"""
Tests that NA values are properly handled during
parsing for all of the parsers defined in parsers.py
"""
import numpy as np
from numpy import nan
import pandas.io.parsers as parsers
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, range
class NAvaluesTests(object):
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = np.array([['foo', 'bar'], [nan, 'baz'], [nan, nan]],
dtype=np.object_)
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_non_string_na_values(self):
# see gh-3611: with an odd float format, we can't match
# the string '999.0' exactly but still need float matching
nice = """A,B
-999,1.2
2,-999
3,4.5
"""
ugly = """A,B
-999,1.200
2,-999.000
3,4.500
"""
na_values_param = [['-999.0', '-999'],
[-999, -999.0],
[-999.0, -999],
['-999.0'], ['-999'],
[-999.0], [-999]]
expected = DataFrame([[np.nan, 1.2], [2.0, np.nan],
[3.0, 4.5]], columns=['A', 'B'])
for data in (nice, ugly):
for na_values in na_values_param:
out = self.read_csv(StringIO(data), na_values=na_values)
tm.assert_frame_equal(out, expected)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
assert _NA_VALUES == parsers._NA_VALUES
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = np.array([[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]])
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_numpy_array_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_numpy_array_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_numpy_array_equal(df3.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
def test_na_values_keep_default(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# see gh-4318: passing na_values=None and
# keep_default_na=False yields 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_na_values_na_filter_override(self):
data = """\
A,B
1,A
nan,B
3,C
"""
expected = DataFrame([[1, 'A'], [np.nan, np.nan], [3, 'C']],
columns=['A', 'B'])
out = self.read_csv(StringIO(data), na_values=['B'], na_filter=True)
tm.assert_frame_equal(out, expected)
expected = DataFrame([['1', 'A'], ['nan', 'B'], ['3', 'C']],
columns=['A', 'B'])
out = self.read_csv(StringIO(data), na_values=['B'], na_filter=False)
tm.assert_frame_equal(out, expected)
def test_na_trailing_columns(self):
data = """Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
result = self.read_csv(StringIO(data))
assert result['Date'][1] == '2012-05-12'
assert result['UnitPrice'].isnull().all()
def test_na_values_scalar(self):
# see gh-12224
names = ['a', 'b']
data = '1,2\n2,1'
expected = DataFrame([[np.nan, 2.0], [2.0, np.nan]],
columns=names)
out = self.read_csv(StringIO(data), names=names, na_values=1)
tm.assert_frame_equal(out, expected)
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]],
columns=names)
out = self.read_csv(StringIO(data), names=names,
na_values={'a': 2, 'b': 1})
tm.assert_frame_equal(out, expected)
def test_na_values_dict_aliasing(self):
na_values = {'a': 2, 'b': 1}
na_values_copy = na_values.copy()
names = ['a', 'b']
data = '1,2\n2,1'
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names)
out = self.read_csv(StringIO(data), names=names, na_values=na_values)
tm.assert_frame_equal(out, expected)
tm.assert_dict_equal(na_values, na_values_copy)
def test_na_values_dict_col_index(self):
# see gh-14203
data = 'a\nfoo\n1'
na_values = {0: 'foo'}
out = self.read_csv(StringIO(data), na_values=na_values)
expected = DataFrame({'a': [np.nan, 1]})
tm.assert_frame_equal(out, expected)
def test_na_values_uint64(self):
# see gh-14983
na_values = [2**63]
data = str(2**63) + '\n' + str(2**63 + 1)
expected = DataFrame([str(2**63), str(2**63 + 1)])
out = self.read_csv(StringIO(data), header=None, na_values=na_values)
tm.assert_frame_equal(out, expected)
data = str(2**63) + ',1' + '\n,2'
expected = DataFrame([[str(2**63), 1], ['', 2]])
out = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(out, expected)
def test_empty_na_values_no_default_with_index(self):
# see gh-15835
data = "a,1\nb,2"
expected = DataFrame({'1': [2]}, index=Index(["b"], name="a"))
out = self.read_csv(StringIO(data), keep_default_na=False, index_col=0)
tm.assert_frame_equal(out, expected)
| mit |
droundy/deft | papers/thesis-scheirer/final/smooth_test_V5_plots.py | 1 | 9603 | from __future__ import division
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
import numpy as np
import matplotlib
import pylab as plt
import os
import sys
import RG
import SW
import time
#temp = plt.linspace(0.6,1.28,20)
#temp = np.concatenate((plt.linspace(1.20,1.21,6),plt.linspace(1.21+0.01/6,1.22,15),plt.linspace(1.22+0.01/15,1.23,6)),axis=0)
##numdata2=250 #Number of numdensity data points
##max_fillingfraction_handled = 0.15
##ffs = plt.linspace(0.0001,max_fillingfraction_handled,numdata2)
##temp = []
##
##for ff in ffs:
## temp.append(1.215 - ((0.5)/(0.15)**3)*abs(ff-0.15)**3)
##
data1 = np.loadtxt('RG_final_data/09_250Ts/cotangent_data/RG_cotangent_data1.out')
data2 = np.loadtxt('RG_final_data/09_250Ts/cotangent_data/RG_cotangent_data2.out')
data3 = np.loadtxt('RG_final_data/09_250Ts/cotangent_data/RG_cotangent_data3.out')
##data4 = np.loadtxt('RG_final_data/08_27Ts/cotangent_data/RG_cotangent_data4.out')
data5 = np.loadtxt('RG_final_data/09_250Ts/cotangent_data/RG_cotangent_data5.out')
##data6 = np.loadtxt('RG_final_data/06_20Ts_fixed/cotangent_data/RG_cotangent_data6.out')
##data7 = np.loadtxt('RG_final_data/06_20Ts_fixed/cotangent_data/RG_cotangent_data7.out')
##data8 = np.loadtxt('RG_final_data/06_20Ts_fixed/cotangent_data/RG_cotangent_data8.out')
datasnaft = np.loadtxt('snaft2.out')
numdata=1000
max_fillingfraction_handled = 0.55
x = plt.linspace(1e-20,max_fillingfraction_handled/(4*np.pi/3),numdata) # x-axis grid space (used for when plotting number density on x-axis)
xmin=(3/(4.0*np.pi))*(1e-20)*(1/(SW.R)**3)
xmax=(3/(4.0*np.pi))*(0.2)*(1/(SW.R)**3)
xff = plt.linspace(xmin,xmax,numdata)
nLs = [datasnaft[i][1] for i in range(0,len(datasnaft))] # list of the left number density solutions obtained from fsolve
nRs = [datasnaft[i][2] for i in range(0,len(datasnaft))] # list of the right number density solutions obtained from fsolve
Tlists = [datasnaft[i][0] for i in range(0,len(datasnaft))] # list of the corresponding temperatures for which the above number densitites were found
ffLs = [i*((4*np.pi*(SW.R)**3)/3) for i in nLs] # converts left number density to filling fraction
ffRs = [i*((4*np.pi*(SW.R)**3)/3) for i in nRs] # converts right number density to filling fraction
nL1 = [data1[i][1] for i in range(0,len(data1))] # list of the left number density solutions obtained from fsolve
nR1 = [data1[i][2] for i in range(0,len(data1))] # list of the right number density solutions obtained from fsolve
Tlist1 = [data1[i][0] for i in range(0,len(data1))] # list of the corresponding temperatures for which the above number densitites were found
ffL1 = [i*((4*np.pi*(SW.R)**3)/3) for i in nL1] # converts left number density to filling fraction
ffR1 = [i*((4*np.pi*(SW.R)**3)/3) for i in nR1] # converts right number density to filling fraction
nL2 = [data2[i][1] for i in range(0,len(data2))] # list of the left number density solutions obtained from fsolve
nR2 = [data2[i][2] for i in range(0,len(data2))] # list of the right number density solutions obtained from fsolve
Tlist2 = [data2[i][0] for i in range(0,len(data2))] # list of the corresponding temperatures for which the above number densitites were found
ffL2 = [i*((4*np.pi*(SW.R)**3)/3) for i in nL2] # converts left number density to filling fraction
ffR2 = [i*((4*np.pi*(SW.R)**3)/3) for i in nR2] # converts right number density to filling fraction
nL3 = [data3[i][1] for i in range(0,len(data3))] # list of the left number density solutions obtained from fsolve
nR3 = [data3[i][2] for i in range(0,len(data3))] # list of the right number density solutions obtained from fsolve
Tlist3 = [data3[i][0] for i in range(0,len(data3))] # list of the corresponding temperatures for which the above number densitites were found
ffL3 = [i*((4*np.pi*(SW.R)**3)/3) for i in nL3] # converts left number density to filling fraction
ffR3 = [i*((4*np.pi*(SW.R)**3)/3) for i in nR3] # converts right number density to filling fraction
##nL4 = [data4[i][1] for i in range(0,len(data4))] # list of the left number density solutions obtained from fsolve
##nR4 = [data4[i][2] for i in range(0,len(data4))] # list of the right number density solutions obtained from fsolve
##Tlist4 = [data4[i][0] for i in range(0,len(data4))] # list of the corresponding temperatures for which the above number densitites were found
##
##ffL4 = [i*((4*np.pi*(SW.R)**3)/3) for i in nL4] # converts left number density to filling fraction
##ffR4 = [i*((4*np.pi*(SW.R)**3)/3) for i in nR4] # converts right number density to filling fraction
##
##
nL5 = [data5[i][1] for i in range(0,len(data5))] # list of the left number density solutions obtained from fsolve
nR5 = [data5[i][2] for i in range(0,len(data5))] # list of the right number density solutions obtained from fsolve
Tlist5 = [data5[i][0] for i in range(0,len(data5))] # list of the corresponding temperatures for which the above number densitites were found
ffL5 = [i*((4*np.pi*(SW.R)**3)/3) for i in nL5] # converts left number density to filling fraction
ffR5 = [i*((4*np.pi*(SW.R)**3)/3) for i in nR5] # converts right number density to filling fraction
##nL6 = [data6[i][1] for i in range(0,len(data6))] # list of the left number density solutions obtained from fsolve
##nR6 = [data6[i][2] for i in range(0,len(data6))] # list of the right number density solutions obtained from fsolve
##Tlist6 = [data6[i][0] for i in range(0,len(data6))] # list of the corresponding temperatures for which the above number densitites were found
##
##ffL6 = [i*((4*np.pi*(SW.R)**3)/3) for i in nL6] # converts left number density to filling fraction
##ffR6 = [i*((4*np.pi*(SW.R)**3)/3) for i in nR6] # converts right number density to filling fraction
##
##
##nL7 = [data7[i][1] for i in range(0,len(data7))] # list of the left number density solutions obtained from fsolve
##nR7 = [data7[i][2] for i in range(0,len(data7))] # list of the right number density solutions obtained from fsolve
##Tlist7 = [data7[i][0] for i in range(0,len(data7))] # list of the corresponding temperatures for which the above number densitites were found
##
##ffL7 = [i*((4*np.pi*(SW.R)**3)/3) for i in nL7] # converts left number density to filling fraction
##ffR7 = [i*((4*np.pi*(SW.R)**3)/3) for i in nR7] # converts right number density to filling fraction
##
##
##
##nL8 = [data8[i][1] for i in range(0,len(data8))] # list of the left number density solutions obtained from fsolve
##nR8 = [data8[i][2] for i in range(0,len(data8))] # list of the right number density solutions obtained from fsolve
##Tlist8 = [data8[i][0] for i in range(0,len(data8))] # list of the corresponding temperatures for which the above number densitites were found
##
##ffL8 = [i*((4*np.pi*(SW.R)**3)/3) for i in nL8] # converts left number density to filling fraction
##ffR8 = [i*((4*np.pi*(SW.R)**3)/3) for i in nR8] # converts right number density to filling fraction
##
white_MD = np.loadtxt('white_MD.dat') # MD
forte_data = np.loadtxt('forte_data.dat') # not converged
forte_data_conv = np.loadtxt('forte_data_conv.dat') # Converged
mrhoWhite = (0.7-0.0)/(345-73)
brhoWhite = 0-mrhoWhite*73
mTWhite = (1.2-0.8)/(76-400)
bTWhite = 1.2-mTWhite*76
rhostar_MD = white_MD[:,0]*mrhoWhite + brhoWhite
T_MD = white_MD[:,1]*mTWhite + bTWhite
eta_MD = rhostar_MD*np.pi/6
mrho = (0.8-0.0)/(504-94)
brho = 0-mrho*94
mT = (0.65-1.85)/(345-52)
bT = 0.65-mT*345
rhostar_forte = forte_data[:,0]*mrho + brho
T_forte = forte_data[:,1]*mT + bT
eta_forte = rhostar_forte*np.pi/6
rhostar_conv = forte_data_conv[:,0]*mrho + brho
T_forte_conv = forte_data_conv[:,1]*mT + bT
eta_conv = rhostar_conv*np.pi/6
def liq_vap_Tvsff():
plt.figure()
#plt.plot(ffL,Tlist,color='#f36118',linewidth=2)
#plt.plot(ffR,Tlist,color='c',linewidth=2)
#plt.plot(eta_MD,T_MD,'yo',label='White 2000')
plt.plot(eta_forte,T_forte,'b--',label='Forte 2011, non-converged RGT')
plt.plot(eta_conv,T_forte_conv,'r--',label='Forte 2011, converged RGT')
plt.plot(ffLs,Tlists,'c',linewidth=2)
plt.plot(ffRs,Tlists,'c',linewidth=2)
## plt.plot(ffL1,Tlist1,'ro',linewidth=2)
## plt.plot(ffR1,Tlist1,'ro',linewidth=2)
##
## plt.plot(ffL2,Tlist2,'go',linewidth=2)
## plt.plot(ffR2,Tlist2,'go',linewidth=2)
##
## plt.plot(ffL3,Tlist3,'bo',linewidth=2)
## plt.plot(ffR3,Tlist3,'bo',linewidth=2)
##
## plt.plot(ffL4,Tlist4,'ko',linewidth=2)
## plt.plot(ffR4,Tlist4,'ko',linewidth=2)
##
plt.plot(ffL5,Tlist5,'yo',linewidth=2)
plt.plot(ffR5,Tlist5,'yo',linewidth=2)
## plt.plot(ffL6,Tlist6,color='c',linewidth=2)
## plt.plot(ffR6,Tlist6,color='c',linewidth=2)
##
## plt.plot(ffL7,Tlist7,color='orange',linewidth=2)
## plt.plot(ffR7,Tlist7,color='orange',linewidth=2)
##
## plt.plot(ffL8,Tlist8,'co',linewidth=2)
## plt.plot(ffR8,Tlist8,'co',linewidth=2)
plt.xlabel(r'$filling fraction$')
plt.ylabel(r'$temperature$')
#plt.xlim(-.05,max(ffR1)+.05)
#plt.xlim([0,0.40])
#plt.ylim([0.8,1.39])
plt.title('liquid-vapor coexistence '+r'$\lambda_{SW}=1.5$')
plt.show()
#plt.savefig('meeting/13may2016/Tvsff_RG_white2.png'%temp)
liq_vap_Tvsff()
| gpl-2.0 |
dsavoiu/kafe2 | examples/011_multifit/03_multifit2.py | 1 | 4626 | """Perform a simultaneous fit to two frequency distributions
(= histograms) with common parameters with kafe2.MultiFit()
This example illustrates another common use-case for multifits,
where the same signal is measured under varying conditions,
e.g. in different detector regions with different resolutions
and background levels.
Consider the distribution of a signal on top of a flat background.
Additional smearing is added to the "true" data values. A second,
similar set of data at the same position and with the same width
is generated, albeit with a differing number of signal events,
smaller signal fraction and less resolution smearing.
A simultaneous fit using the kafe2 MultiFit feature is then performed
to extract the position and raw width common to the two data sets.
*Note*: in this simple case of two independent frequency distributions
the results for the common parameters could also be determined by
combination of the results from two individual fits to each of the
histograms.
"""
from kafe2 import Fit, Plot, HistContainer, MultiFit
import numpy as np
import matplotlib.pyplot as plt
# function fo generate the signal-plus-background distributions
def generate_data(N, min, max, pos, width, s):
"""generate a random dataset:
Gaussian signal at position p with width w and signal fraction s
on top of a flat background between min and max
"""
# signal sample
data_s = np.random.normal(loc=pos, scale=width, size=int(s * N))
# background sample
data_b = np.random.uniform(low=min, high=max, size=int((1 - s) * N))
return np.concatenate((data_s, data_b))
# the fit functions, one for each version of the distribution with
# different resolution and signal fraction
#
def SplusBmodel1(x, mu=5., width=0.3, res1=0.3, sf1=0.5):
"""pdf of a Gaussian signal at position mu, with natural width width,
resolution res1 and signal fraction sf1 on a flat background
"""
sigma2 = width * width + res1 * res1
normal = np.exp(-0.5 * (x - mu) ** 2 / sigma2) / np.sqrt(2.0 * np.pi * sigma2)
flat = 1. / (max - min)
return sf1 * normal + (1 - sf1) * flat
def SplusBmodel2(x, mu=5., width=0.3, res2=0.3, sf2=0.5):
"""pdf of a Gaussian signal at position mu, with natural width width,
resolution res2 and signal fraction sf2 on a flat background
"""
sigma2 = width * width + res2 * res2
normal = np.exp(-0.5 * (x - mu) ** 2 / sigma2) / np.sqrt(2.0 * np.pi * sigma2)
flat = 1. / (max - min)
return sf2 * normal + (1 - sf2) * flat
# --- generate data sets, set up and perform fit
min = 0.
max = 10.
pos = 6.66
width = 0.33
# -- generate a first data set
s1 = 0.8
N1 = 200
r1 = 2 * width # smearing twice as large as natural width
SplusB_raw1 = generate_data(N1, min, max, pos, width, s1)
# apply resolution smearing to data set SplusB_data
SplusB_data1 = SplusB_raw1 + np.random.normal(loc=0., scale=r1, size=len(SplusB_raw1))
# -- generate a second data set at the same position and width,
# but with smaller signal fraction, better resolution and more events
s2 = 0.25
N2 = 500
r2 = width / 3.
SplusB_raw2 = generate_data(N2, min, max, pos, width, s2)
SplusB_data2 = SplusB_raw2 + np.random.normal(loc=0., scale=r2, size=len(SplusB_raw2))
# -- Create histogram containers from the two datasets
SplusB_histogram1 = HistContainer(n_bins=30, bin_range=(min, max), fill_data=SplusB_data1)
SplusB_histogram2 = HistContainer(n_bins=50, bin_range=(min, max), fill_data=SplusB_data2)
# -- create Fit objects by specifying their density functions with corresponding parameters
hist_fit1 = Fit(data=SplusB_histogram1, model_function=SplusBmodel1)
hist_fit2 = Fit(data=SplusB_histogram2, model_function=SplusBmodel2)
# to make the fit unambiguous,
# external knowledge on the resolutions must be applied
hist_fit1.add_parameter_constraint(name='res1', value=r1, uncertainty=r1 / 4.)
hist_fit2.add_parameter_constraint(name='res2', value=r2, uncertainty=r2 / 2.)
# -- test: perform individual fits
print('\n*==* Result of fit to first histogram')
hist_fit1.do_fit()
hist_fit1.report()
print('\n*==* Result of fit to second histogram')
hist_fit2.do_fit()
hist_fit2.report()
# combine the two fits to a MultiFit
multi_fit = MultiFit(fit_list=[hist_fit1, hist_fit2])
multi_fit.do_fit() # do the fit
print('\n*==* Result of multi-fit to both histograms')
multi_fit.report() # Optional: print a report to the terminal
# Optional: create output graphics
multi_plot = Plot(multi_fit, separate_figures=True)
multi_plot.plot(asymmetric_parameter_errors=True)
plt.show()
| gpl-3.0 |
annahs/atmos_research | WHI_long_term_size_distrs_fresh_emissions-includes_sampled_vol-sep_by_precip.py | 1 | 24872 | import matplotlib.pyplot as plt
import numpy as np
from matplotlib import dates
import os
import pickle
from datetime import datetime
from pprint import pprint
import sys
from datetime import timedelta
import calendar
import math
import copy
timezone = timedelta(hours = 0) #using zero here b/c most files were written with old PST code, have a correction further down for those (2009 early 2012) run with newer UTC code
AD_corr = True
#1. #alter the dates to set limits on data analysis range
start_analysis_at = datetime.strptime('20120101','%Y%m%d')
end_analysis_at = datetime.strptime('20120531','%Y%m%d')
########data dirs
directory_list = [
#'D:/2009/WHI_ECSP2/Binary/',
#'D:/2010/WHI_ECSP2/Binary/',
'D:/2012/WHI_UBCSP2/Binary/',
]
#tracking odd neg intervals (buffering issue?)
argh = 0
ok = 0
err_count = 0
non_err_count = 0
##############initialize binning variables
bins = []
start_size = 70 #VED in nm
end_size = 220 #VED in nm
interval_length = 5 #in nm
#need durations to calc sampled volume later for concs
sampling_duration_cluster_1_no_precip = 0
sampling_duration_cluster_2_no_precip = 0
sampling_duration_cluster_3_no_precip = 0
sampling_duration_cluster_4_no_precip = 0
sampling_duration_cluster_5_no_precip = 0
sampling_duration_cluster_6_no_precip = 0
sampling_duration_GBPS_no_precip = 0
sampling_duration_fresh_no_precip = 0
sampling_duration_cluster_1_precip = 0
sampling_duration_cluster_2_precip = 0
sampling_duration_cluster_3_precip = 0
sampling_duration_cluster_4_precip = 0
sampling_duration_cluster_5_precip = 0
sampling_duration_cluster_6_precip = 0
sampling_duration_GBPS_precip = 0
sampling_duration_fresh_precip = 0
sampling_duration_allFT = 0
#create list of size bins
while start_size < end_size:
bins.append(start_size)
start_size += interval_length
#create dictionary with size bins as keys
binned_data = {}
for bin in bins:
binned_data[bin] = [0,0]
###create a binning dictionary for each air mass category
rBC_FT_data_cluster_1_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_2_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_3_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_4_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_5_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_6_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_GBPS_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_fresh_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_1_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_2_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_3_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_4_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_5_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_6_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_GBPS_precip = copy.deepcopy(binned_data)
rBC_FT_data_fresh_precip = copy.deepcopy(binned_data)
rBC_FT_data_all = copy.deepcopy(binned_data)
######get spike times (these are sorted by datetime)
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/')
file = open('WHI_rBC_record_2009to2013-spike_times.rbcpckl', 'r')
spike_times_full = pickle.load(file)
file.close()
spike_times = []
for spike in spike_times_full:
if spike.year >= start_analysis_at.year:
if spike <= end_analysis_at:
spike_times.append(spike)
##########open cluslist and read into a python list
cluslist = []
#CLUSLIST_file = 'C:/hysplit4/working/WHI/2hrly_HYSPLIT_files/all_with_sep_GBPS/CLUSLIST_6-mod'
#CLUSLIST_file = 'C:/hysplit4/working/WHI/2hrly_HYSPLIT_files/all_with_sep_GBPS/CLUSLIST_6-mod-precip_added-sig_precip_any_time'
CLUSLIST_file = 'C:/hysplit4/working/WHI/2hrly_HYSPLIT_files/all_with_sep_GBPS/CLUSLIST_6-mod-precip_added-sig_precip_72hrs_pre_arrival'
with open(CLUSLIST_file,'r') as f:
for line in f:
newline = line.split()
cluster_no = int(newline[0])
traj_time = datetime(int(newline[2])+2000,int(newline[3]),int(newline[4]),int(newline[5])) + timezone
significant_rainfall = newline[8]
if traj_time.year >= start_analysis_at.year:
cluslist.append([traj_time,cluster_no,significant_rainfall])
# sort cluslist by row_datetime in place
cluslist.sort(key=lambda clus_info: clus_info[0])
######this helper method allows conversion of BC mass from a value arrived at via an old calibration to a value arrived at via a new calibration
#quad eqytn = ax2 + bx + c
def PeakHtFromMass(BC_mass,var_C,var_b,var_a):
C = var_C
b = var_b
a = var_a
c = C - BC_mass
d = b**2-4*a*c
if d < 0:
#This equation has no real solution"
return np.nan
elif d == 0:
# This equation has one solutions
x = (-b+math.sqrt(b**2-4*a*c))/(2*a)
return x
else:
#This equation has two solutions
x1 = (-b+math.sqrt((b**2)-(4*(a*c))))/(2*a)
x2 = (-b-math.sqrt((b**2)-(4*(a*c))))/(2*a)
if x1 <4000:
return x1
if x2 <4000:
return x2
#get BC data
for directory in directory_list:
os.chdir(directory)
print directory
for item in os.listdir('.'):
if os.path.isdir(item) == True and item.startswith('20'):
folder_date = datetime.strptime(item, '%Y%m%d')
if folder_date >= start_analysis_at and folder_date <= end_analysis_at:
if folder_date.year == 2009:
old_C = 0
old_b = 0.012
old_a = 0
new_C = 0.01244
new_b = 0.0172
if folder_date.year == 2010:
old_C = 0.156
old_b = 0.00606
old_a = 6.3931e-7
new_C = -0.32619
new_b = 0.01081
if folder_date.year == 2012:
old_C = 0.20699
old_b = 0.00246
old_a = -1.09254e-7
new_C = 0.24826
new_b = 0.003043
os.chdir(item)
for file in os.listdir('.'):
if file.endswith('.ptxt'):
print file
f = open(file,'r')
f.readline()
for line in f:
newline = line.split('\t')
start_time = float(newline[0])
end_time = float(newline[1])
incand_flag = float(newline[2])
incand_sat_flag = int(newline[3])
BC_mass = float(newline[4])
BC_mass_old = float(newline[4])
if AD_corr == True:
if folder_date.year == 2009:
pk_ht = BC_mass/old_b
else:
pk_ht = PeakHtFromMass(BC_mass, old_C, old_b, old_a)
BC_mass = new_b*pk_ht + new_C
try:
BC_VED = (((BC_mass/(10**15*1.8))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
except:
#print BC_mass, BC_mass_old, datetime.utcfromtimestamp(end_time), err_count
err_count+=1
continue
non_err_count +=1
#this is to account for me running the first few 2012 days and all of 2009 with the new UTC code (the rest are old PST code)
if datetime.strptime('20120401', '%Y%m%d') <= datetime.utcfromtimestamp(start_time) <= datetime.strptime('20120410', '%Y%m%d'):
timezone = timedelta(hours = -8)
if datetime.utcfromtimestamp(start_time) <= datetime.strptime('20091231', '%Y%m%d'):
timezone = timedelta(hours = -8)
start_time_obj = datetime.utcfromtimestamp(start_time)+timezone
end_time_obj = datetime.utcfromtimestamp(end_time)+timezone
#ignore annoying neg intervals
if end_time_obj < start_time_obj:
argh += 1
continue
else:
ok +=1
#pop off any cluslist times that are in the past
cluslist_current_datetime = cluslist[0][0] #in PST
while end_time_obj > (cluslist_current_datetime + timedelta(hours=1)):
cluslist.pop(0)
if len(cluslist):
cluslist_current_datetime = cluslist[0][0]
else:
break
#get cluster no
cluslist_current_cluster_no = cluslist[0][1]
sig_rain_str = cluslist[0][2]
if sig_rain_str == 'True':
sig_rain = True
if sig_rain_str == 'False':
sig_rain = False
#use spike times to get fresh emissions data
spike_half_interval = 2
if len(spike_times):
spike_start = spike_times[0]-timedelta(minutes=spike_half_interval)
spike_end = spike_times[0]+timedelta(minutes=spike_half_interval)
while end_time_obj >= spike_end:
print 'pop spike time', end_time_obj, spike_times[0]
spike_times.pop(0)
if len(spike_times):
spike_start = spike_times[0]-timedelta(minutes=spike_half_interval)
spike_end = spike_times[0]+timedelta(minutes=spike_half_interval)
if len(spike_times) == 0:
print 'no more spike times'
break
if (start_time_obj < spike_start or start_time_obj < spike_end) and (end_time_obj > spike_start or end_time_obj > spike_end):
for key in rBC_FT_data_fresh_precip:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
if sig_rain == True:
rBC_FT_data_fresh_precip[key][0] = rBC_FT_data_fresh_precip[key][0] + BC_mass
rBC_FT_data_fresh_precip[key][1] = rBC_FT_data_fresh_precip[key][1] + 1
sampling_duration_fresh_precip = sampling_duration_fresh_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_fresh_no_precip[key][0] = rBC_FT_data_fresh_no_precip[key][0] + BC_mass
rBC_FT_data_fresh_no_precip[key][1] = rBC_FT_data_fresh_no_precip[key][1] + 1
sampling_duration_fresh_no_precip = sampling_duration_fresh_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
#add data to list in cluster dictionaries (1 list per cluster time early night/late night)
if ((cluslist_current_datetime-timedelta(hours=3)) <= end_time_obj <= (cluslist_current_datetime+timedelta(hours=3))):
if cluslist_current_cluster_no == 7:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_GBPS_precip[key][0] = rBC_FT_data_cluster_GBPS_precip[key][0] + BC_mass
rBC_FT_data_cluster_GBPS_precip[key][1] = rBC_FT_data_cluster_GBPS_precip[key][1] + 1
sampling_duration_GBPS_precip = sampling_duration_GBPS_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_GBPS_no_precip[key][0] = rBC_FT_data_cluster_GBPS_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_GBPS_no_precip[key][1] = rBC_FT_data_cluster_GBPS_no_precip[key][1] + 1
sampling_duration_GBPS_no_precip = sampling_duration_GBPS_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 1:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_1_precip[key][0] = rBC_FT_data_cluster_1_precip[key][0] + BC_mass
rBC_FT_data_cluster_1_precip[key][1] = rBC_FT_data_cluster_1_precip[key][1] + 1
sampling_duration_cluster_1_precip = sampling_duration_cluster_1_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_1_no_precip[key][0] = rBC_FT_data_cluster_1_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_1_no_precip[key][1] = rBC_FT_data_cluster_1_no_precip[key][1] + 1
sampling_duration_cluster_1_no_precip = sampling_duration_cluster_1_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 2:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_2_precip[key][0] = rBC_FT_data_cluster_2_precip[key][0] + BC_mass
rBC_FT_data_cluster_2_precip[key][1] = rBC_FT_data_cluster_2_precip[key][1] + 1
sampling_duration_cluster_2_precip = sampling_duration_cluster_2_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_2_no_precip[key][0] = rBC_FT_data_cluster_2_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_2_no_precip[key][1] = rBC_FT_data_cluster_2_no_precip[key][1] + 1
sampling_duration_cluster_2_no_precip = sampling_duration_cluster_2_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 3:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_3_precip[key][0] = rBC_FT_data_cluster_3_precip[key][0] + BC_mass
rBC_FT_data_cluster_3_precip[key][1] = rBC_FT_data_cluster_3_precip[key][1] + 1
sampling_duration_cluster_3_precip = sampling_duration_cluster_3_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_3_no_precip[key][0] = rBC_FT_data_cluster_3_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_3_no_precip[key][1] = rBC_FT_data_cluster_3_no_precip[key][1] + 1
sampling_duration_cluster_3_no_precip = sampling_duration_cluster_3_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 4:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_4_precip[key][0] = rBC_FT_data_cluster_4_precip[key][0] + BC_mass
rBC_FT_data_cluster_4_precip[key][1] = rBC_FT_data_cluster_4_precip[key][1] + 1
sampling_duration_cluster_4_precip = sampling_duration_cluster_4_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_4_no_precip[key][0] = rBC_FT_data_cluster_4_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_4_no_precip[key][1] = rBC_FT_data_cluster_4_no_precip[key][1] + 1
sampling_duration_cluster_4_no_precip = sampling_duration_cluster_4_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 5:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_5_precip[key][0] = rBC_FT_data_cluster_5_precip[key][0] + BC_mass
rBC_FT_data_cluster_5_precip[key][1] = rBC_FT_data_cluster_5_precip[key][1] + 1
sampling_duration_cluster_5_precip = sampling_duration_cluster_5_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_5_no_precip[key][0] = rBC_FT_data_cluster_5_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_5_no_precip[key][1] = rBC_FT_data_cluster_5_no_precip[key][1] + 1
sampling_duration_cluster_5_no_precip = sampling_duration_cluster_5_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 6:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_6_precip[key][0] = rBC_FT_data_cluster_6_precip[key][0] + BC_mass
rBC_FT_data_cluster_6_precip[key][1] = rBC_FT_data_cluster_6_precip[key][1] + 1
sampling_duration_cluster_6_precip = sampling_duration_cluster_6_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_6_no_precip[key][0] = rBC_FT_data_cluster_6_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_6_no_precip[key][1] = rBC_FT_data_cluster_6_no_precip[key][1] + 1
sampling_duration_cluster_6_no_precip = sampling_duration_cluster_6_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
f.close()
os.chdir(directory)
print 'neg times', argh, ok, argh*100./(argh+ok)
print err_count, non_err_count, err_count*100./(err_count+non_err_count)
average_flow = 120
total_sampled_volume_1_precip = sampling_duration_cluster_1_precip*average_flow/60
total_sampled_volume_2_precip = sampling_duration_cluster_2_precip*average_flow/60
total_sampled_volume_3_precip = sampling_duration_cluster_3_precip *average_flow/60
total_sampled_volume_4_precip = sampling_duration_cluster_4_precip*average_flow/60
total_sampled_volume_5_precip = sampling_duration_cluster_5_precip *average_flow/60
total_sampled_volume_6_precip = sampling_duration_cluster_6_precip*average_flow/60
total_sampled_volume_GBPS_precip = sampling_duration_GBPS_precip*average_flow/60
total_sampled_volume_fresh_precip = sampling_duration_fresh_precip*average_flow/60
total_sampled_volume_1_no_precip = sampling_duration_cluster_1_no_precip*average_flow/60
total_sampled_volume_2_no_precip = sampling_duration_cluster_2_no_precip*average_flow/60
total_sampled_volume_3_no_precip = sampling_duration_cluster_3_no_precip *average_flow/60
total_sampled_volume_4_no_precip = sampling_duration_cluster_4_no_precip*average_flow/60
total_sampled_volume_5_no_precip = sampling_duration_cluster_5_no_precip *average_flow/60
total_sampled_volume_6_no_precip = sampling_duration_cluster_6_no_precip*average_flow/60
total_sampled_volume_GBPS_no_precip = sampling_duration_GBPS_no_precip*average_flow/60
total_sampled_volume_fresh_no_precip = sampling_duration_fresh_no_precip*average_flow/60
total_sampled_volume_allFT = sampling_duration_allFT*average_flow/60
#v=create lists
rBC_FT_data_cluster_1_l_precip = []
rBC_FT_data_cluster_2_l_precip = []
rBC_FT_data_cluster_3_l_precip = []
rBC_FT_data_cluster_4_l_precip = []
rBC_FT_data_cluster_5_l_precip = []
rBC_FT_data_cluster_6_l_precip = []
rBC_FT_data_cluster_GBPS_l_precip = []
rBC_FT_data_fresh_l_precip = []
rBC_FT_data_cluster_1_l_no_precip = []
rBC_FT_data_cluster_2_l_no_precip = []
rBC_FT_data_cluster_3_l_no_precip = []
rBC_FT_data_cluster_4_l_no_precip = []
rBC_FT_data_cluster_5_l_no_precip = []
rBC_FT_data_cluster_6_l_no_precip = []
rBC_FT_data_cluster_GBPS_l_no_precip = []
rBC_FT_data_fresh_l_no_precip = []
rBC_FT_data_all_l = []
#put lists etc in array
binned_data_lists = [
[rBC_FT_data_cluster_1_precip ,rBC_FT_data_cluster_1_l_precip , total_sampled_volume_1_precip,'c1_precip'],
[rBC_FT_data_cluster_2_precip ,rBC_FT_data_cluster_2_l_precip , total_sampled_volume_2_precip,'c2_precip'],
[rBC_FT_data_cluster_3_precip ,rBC_FT_data_cluster_3_l_precip , total_sampled_volume_3_precip,'c3_precip'],
[rBC_FT_data_cluster_4_precip ,rBC_FT_data_cluster_4_l_precip , total_sampled_volume_4_precip,'c4_precip'],
[rBC_FT_data_cluster_5_precip ,rBC_FT_data_cluster_5_l_precip , total_sampled_volume_5_precip,'c5_precip'],
[rBC_FT_data_cluster_6_precip ,rBC_FT_data_cluster_6_l_precip , total_sampled_volume_6_precip,'c6_precip'],
[rBC_FT_data_cluster_GBPS_precip ,rBC_FT_data_cluster_GBPS_l_precip , total_sampled_volume_GBPS_precip,'GBPS_precip'],
[rBC_FT_data_fresh_precip ,rBC_FT_data_fresh_l_precip , total_sampled_volume_fresh_precip,'fresh_precip'],
[rBC_FT_data_cluster_1_no_precip ,rBC_FT_data_cluster_1_l_no_precip , total_sampled_volume_1_no_precip,'c1_no_precip'],
[rBC_FT_data_cluster_2_no_precip ,rBC_FT_data_cluster_2_l_no_precip , total_sampled_volume_2_no_precip,'c2_no_precip'],
[rBC_FT_data_cluster_3_no_precip ,rBC_FT_data_cluster_3_l_no_precip , total_sampled_volume_3_no_precip,'c3_no_precip'],
[rBC_FT_data_cluster_4_no_precip ,rBC_FT_data_cluster_4_l_no_precip , total_sampled_volume_4_no_precip,'c4_no_precip'],
[rBC_FT_data_cluster_5_no_precip ,rBC_FT_data_cluster_5_l_no_precip , total_sampled_volume_5_no_precip,'c5_no_precip'],
[rBC_FT_data_cluster_6_no_precip ,rBC_FT_data_cluster_6_l_no_precip , total_sampled_volume_6_no_precip,'c6_no_precip'],
[rBC_FT_data_cluster_GBPS_no_precip ,rBC_FT_data_cluster_GBPS_l_no_precip , total_sampled_volume_GBPS_no_precip,'GBPS_no_precip'],
[rBC_FT_data_fresh_no_precip ,rBC_FT_data_fresh_l_no_precip , total_sampled_volume_fresh_no_precip,'fresh_no_precip'],
[rBC_FT_data_all ,rBC_FT_data_all_l , total_sampled_volume_allFT,'all_FT'],
]
#fiddle with data (sort, normalize, etc)
for line in binned_data_lists:
dict = line[0]
list = line[1]
sampled_vol = line[2]
for bin, value in dict.iteritems():
bin_mass = value[0]
bin_numb = value[1]
try:
bin_mass_conc = bin_mass/sampled_vol #gives mass per cc
bin_numb_conc = bin_numb/sampled_vol #gives number per cc
temp = [bin,bin_mass_conc,bin_numb_conc]
except:
temp = [bin,np.nan,np.nan]
list.append(temp)
list.sort()
for row in list: #normalize
row.append(row[1]) #these 2 lines append teh raw mass and number concs
row.append(row[2])
row[1] = row[1]/(math.log(row[0]+interval_length)-math.log(row[0])) #d/dlog(VED)
row[2] = row[2]/(math.log(row[0]+interval_length)-math.log(row[0])) #d/dlog(VED)
row[0] = row[0]+interval_length/2 #correction for our binning code recording bin starts as keys instead of midpoints
#write final list of interval data to file and pickle
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/size_distrs/')
for list in binned_data_lists:
file = open('AD_corr - size distr - FT - ' + list[3] + '.txt', 'w')
file.write('size_bin_midpoint(VEDnm)' + '\t'+ 'dM/dlog(VED)_(ng/cm3)' + '\t'+ 'd#/dlog(VED)_(#/cm3)' + '\t' + 'dM(VED)_(ng/cm3)' + '\t'+ 'd#(VED)_(#/cm3)' + '\n')
file.write('total sampled volume:' + str(list[2]) + 'cc' + '\n')
for row in list[1]:
line = '\t'.join(str(x) for x in row)
file.write(line + '\n')
file.close()
file = open('AD_corr - size distr - FT - ' + list[3] + '.sdbinpickl', 'w')
pickle.dump(list[1], file)
file.close()
| mit |
schreiberx/sweet | benchmarks_sphere/paper_jrn_nla_rexi_linear/sph_rexi_linear_paper_gaussian_ts_comparison_earth_scale_cheyenne_performance/postprocessing_output_eta_err_vs_simtime.py | 1 | 3193 | #! /usr/bin/env python3
import sys
import matplotlib.pyplot as plt
import re
from matplotlib.lines import Line2D
#
# First, use
# ./postprocessing.py > postprocessing_output.txt
# to generate the .txt file
#
fig, ax = plt.subplots(figsize=(10,7))
ax.set_xscale("log", nonposx='clip')
ax.set_yscale("log", nonposy='clip')
mode = 'simtime'
#mode = 'dt'
with open('postprocessing_output_eta.txt') as f:
lines = f.readlines()
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
markers = []
for m in Line2D.markers:
try:
if len(m) == 1 and m != ' ' and m != '':
markers.append(m)
except TypeError:
pass
linestyles = ['-', '--', ':', '-.']
if len(sys.argv) > 1:
output_filename = sys.argv[1]
else:
output_filename = "./postprocessing_output_eta_err_vs_"+mode+".pdf"
if len(sys.argv) > 2:
plot_set = sys.argv[2:]
else:
plot_set = []
def plot(x, y, marker, linestyle, label):
# plot values and prev_name
print(label)
#print(values_err)
#print(values_time)
#print("")
if len(x) == 0:
return
if len(plot_set) != 0:
if prev_name not in plot_set:
return
ax.plot(x, y, marker=marker, linestyle=linestyle, label=label)
prev_name = ''
values_err = []
values_time = []
c = 2
for l in lines:
if l[-1] == '\n':
l = l[0:-1]
d = l.split("\t")
if d[0] == 'Running tests for new group:':
plot(values_time, values_err, markers[c % len(markers)], linestyles[c % len(linestyles)], prev_name)
for i, txt in enumerate(values_time):
ax.annotate("%.1f" % txt, (values_time[i]*1.03, values_err[i]*1.03))
prev_name = d[0]
values_err = []
values_time = []
c = c+1
continue
if len(d) != 5:
continue
if d[0] == 'SIMNAME':
continue
prev_name = d[0]
prev_name = prev_name.replace('script_ln2_b100_g9.81_h10000_f7.2921e-05_p0_a6371220_u0.0_rob1_fsph0_tsm_', '')
prev_name = prev_name.replace('_M0128_MPI_space01_time128', '')
prev_name = prev_name.replace('_M0128_MPI_space01_time001', '')
prev_name = prev_name.replace('_prcircle_nrm0_hlf0_pre1_ext00', '')
prev_name = prev_name.replace('_tso2_tsob2_REXICI', '')
prev_name = prev_name.replace('_C0040', '')
prev_name = prev_name.replace('_C0080', '')
prev_name = prev_name.replace('_C0160', '')
prev_name = prev_name.replace('_C0320', '')
prev_name = prev_name.replace('_C0640', '')
prev_name = prev_name.replace('_C1280', '')
prev_name = prev_name.replace('_C2560', '')
prev_name = prev_name.replace('_mr10.0_mi30.0', '')
prev_name = prev_name.replace('_n0064_sx50.0_sy50.0', '')
prev_name = prev_name.replace('_n0064', '')
prev_name = prev_name.replace('_sx50.0_sy50.0', '')
prev_name = re.sub(r"_mu.*", "", prev_name)
prev_name = re.sub(r"0000", "", prev_name)
values_err.append(float(d[1]))
if mode == 'simtime':
#
# SIMTIME
#
values_time.append(float(d[4]))
plt.xlabel("simulation time")
elif mode == 'dt':
#
# DT
#
m = re.search('_C([0-9]*)', d[0])
dt = float(m.group(1))
values_time.append(dt)
plt.xlabel("Timestep size")
plt.ylabel("Error")
plot(values_time, values_err, markers[c % len(markers)], linestyles[c % len(linestyles)], prev_name)
plt.legend()
plt.savefig(output_filename)
#plt.show()
| mit |
sdpython/cvxpy | examples/expr_trees/1D_convolution.py | 12 | 1453 | #!/usr/bin/env python
from cvxpy import *
import numpy as np
import random
from math import pi, sqrt, exp
def gauss(n=11,sigma=1):
r = range(-int(n/2),int(n/2)+1)
return [1 / (sigma * sqrt(2*pi)) * exp(-float(x)**2/(2*sigma**2)) for x in r]
np.random.seed(5)
random.seed(5)
DENSITY = 0.008
n = 1000
x = Variable(n)
# Create sparse signal.
signal = np.zeros(n)
nnz = 0
for i in range(n):
if random.random() < DENSITY:
signal[i] = random.uniform(0, 100)
nnz += 1
# Gaussian kernel.
m = 1001
kernel = gauss(m, m/10)
# Noisy signal.
std = 1
noise = np.random.normal(scale=std, size=n+m-1)
noisy_signal = conv(kernel, signal) #+ noise
gamma = Parameter(sign="positive")
fit = norm(conv(kernel, x) - noisy_signal, 2)
regularization = norm(x, 1)
constraints = [x >= 0]
gamma.value = 0.06
prob = Problem(Minimize(fit), constraints)
solver_options = {"NORMALIZE": True, "MAX_ITERS": 2500,
"EPS":1e-3}
result = prob.solve(solver=SCS,
verbose=True,
NORMALIZE=True,
MAX_ITERS=2500)
# Get problem matrix.
data, dims = prob.get_problem_data(solver=SCS)
# Plot result and fit.
import matplotlib.pyplot as plt
plt.plot(range(n), signal, label="true signal")
plt.plot(range(n), np.asarray(noisy_signal.value[:n, 0]), label="noisy convolution")
plt.plot(range(n), np.asarray(x.value[:,0]), label="recovered signal")
plt.legend(loc='upper right')
plt.show()
| gpl-3.0 |
MaxHalford/StSICMR-Inference | utests.py | 1 | 1108 | # Verifying the installation
try:
import matplotlib
except ImportError:
print('matplotlib is not installed')
try:
import pandas
except ImportError:
print('pandas is not installed')
try:
import numpy
except ImportError:
print('numpy is not installed')
# Verifying the model
try:
from lib import model
m = model.StSICMR(3, [0, 10, 20], [10, 1, 10], [1, 1, 1])
except:
print('The model module is not working.')
# Verifying the plotting
try:
from lib import plotting
plotting.plotModel(m, times=[0.1, 0.2, 0.3], logScale=True, show=False)
except:
print('The plotting module is not working.')
# Verifying the genetic algorithm
try:
from lib.inference import genalg
pop = genalg.Population(model.StSICMR, [0.1, 0.2, 0.3], [1, 2, 1],
switches=1,
sizeChange=False,
repetitions=1,
method='least_squares')
pop.enhance(10, repeat=False)
except:
print('The genetic algorithm module is not working.')
print ('All the tests were successful!') | mit |
ttouchstone/deap | examples/coev/coop_gen.py | 12 | 4886 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""This example contains the generalizing test from *Potter, M. and De Jong,
K., 2001, Cooperative Coevolution: An Architecture for Evolving Co-adapted
Subcomponents.* section 4.2.2. Varying the *NUM_SPECIES* in :math:`[1, \ldots,
4]` will produce the results for one to four species respectively.
"""
import random
try:
import matplotlib.pyplot as plt
except ImportError:
plt = False
import numpy
from deap import algorithms
from deap import tools
import coop_base
IND_SIZE = coop_base.IND_SIZE
SPECIES_SIZE = coop_base.SPECIES_SIZE
NUM_SPECIES = 4
TARGET_SIZE = 30
noise = "*##*###*###*****##*##****#*##*###*#****##******##*#**#*#**######"
schematas = ("1##1###1###11111##1##1111#1##1###1#1111##111111##1#11#1#11######",
"1##1###1###11111##1##1000#0##0###0#0000##000000##0#00#0#00######",
"0##0###0###00000##0##0000#0##0###0#0000##001111##1#11#1#11######")
toolbox = coop_base.toolbox
if plt:
# This will allow to plot the match strength of every target schemata
toolbox.register("evaluate_nonoise", coop_base.matchSetStrengthNoNoise)
def main(extended=True, verbose=True):
target_set = []
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "species", "evals", "std", "min", "avg", "max"
ngen = 150
g = 0
for i in range(len(schematas)):
size = int(TARGET_SIZE/len(schematas))
target_set.extend(toolbox.target_set(schematas[i], size))
species = [toolbox.species() for _ in range(NUM_SPECIES)]
# Init with random a representative for each species
representatives = [random.choice(s) for s in species]
if plt and extended:
# We must save the match strength to plot them
t1, t2, t3 = list(), list(), list()
while g < ngen:
# Initialize a container for the next generation representatives
next_repr = [None] * len(species)
for i, s in enumerate(species):
# Vary the species individuals
s = algorithms.varAnd(s, toolbox, 0.6, 1.0)
# Get the representatives excluding the current species
r = representatives[:i] + representatives[i+1:]
for ind in s:
ind.fitness.values = toolbox.evaluate([ind] + r, target_set)
record = stats.compile(s)
logbook.record(gen=g, species=i, evals=len(s), **record)
if verbose:
print(logbook.stream)
# Select the individuals
species[i] = toolbox.select(s, len(s)) # Tournament selection
next_repr[i] = toolbox.get_best(s)[0] # Best selection
g += 1
if plt and extended:
# Compute the match strength without noise for the
# representatives on the three schematas
t1.append(toolbox.evaluate_nonoise(representatives,
toolbox.target_set(schematas[0], 1), noise)[0])
t2.append(toolbox.evaluate_nonoise(representatives,
toolbox.target_set(schematas[1], 1), noise)[0])
t3.append(toolbox.evaluate_nonoise(representatives,
toolbox.target_set(schematas[2], 1), noise)[0])
representatives = next_repr
if extended:
for r in representatives:
# print individuals without noise
print("".join(str(x) for x, y in zip(r, noise) if y == "*"))
if plt and extended:
# Do the final plotting
plt.plot(t1, '-', color="k", label="Target 1")
plt.plot(t2, '--', color="k", label="Target 2")
plt.plot(t3, ':', color="k", label="Target 3")
plt.legend(loc="lower right")
plt.axis([0, ngen, 0, max(max(t1), max(t2), max(t3)) + 1])
plt.xlabel("Generations")
plt.ylabel("Number of matched bits")
plt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
jgillis/casadi | experimental/joel/shallow/shallow.py | 1 | 5492 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
from numpy import *
from matplotlib.pylab import plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import time
import copy
# Physical parameters
g = 9.81 # gravity
poolwidth = 0.2
drag0 = 2.0 # => u(0)
depth0 = 0.01 # => u(1)
sprad = 0.03
spheight = 0.01
endtime = 1.0
# Discretization
numboxes = 20
num_eulersteps = 100
num_measurements = 100
# Plotting
plot_progress = False
numboxes_per_plot = 1
# Discretization
ntimesteps = num_eulersteps*num_measurements
dt = endtime/ntimesteps
dx = poolwidth/numboxes
dy = poolwidth/numboxes
x = linspace(0,poolwidth,numboxes)
y = linspace(0,poolwidth,numboxes)
[X,Y] = meshgrid(x,y)
# Initial conditions
u0 = zeros((numboxes+1, numboxes))
v0 = zeros((numboxes , numboxes+1))
h0 = zeros((numboxes , numboxes))
spdist = sqrt( (X-0.04)**2 + (Y-0.04)**2)
I = spdist<sprad/3.0
h0[I] = spheight * cos(3.0*pi*spdist[I]/(2.0*sprad))
# Free parameters
drag = ssym("b")
depth = ssym("H")
p = vertcat([drag,depth])
# The state at a measurement
uk = ssym("uk",numboxes+1, numboxes)
vk = ssym("vk",numboxes , numboxes+1)
hk = ssym("hk",numboxes , numboxes)
# Mayer term
hmeas = ssym("h_meas",numboxes , numboxes)
m = SXFunction([hk,hmeas],[sumAll((hk-hmeas)**2)])
m.init()
# Take one step of the integrator
u = SXMatrix(uk)
v = SXMatrix(vk)
h = SXMatrix(hk)
u[1:-1,:] = u[1:-1,:] + dt*(-g/dx * (h[1:,:]-h[:-1,:]) - u[1:-1,:]*drag)
v[:,1:-1] = v[:,1:-1] + dt*(-g/dy * (h[:,1:]-h[:,:-1]) - v[:,1:-1]*drag)
h[:,:] = h[:,:] + (-depth*dt)*(1.0/dx*(u[1:,:]-u[:-1,:]) + 1.0/dy * (v[:,1:]-v[:,:-1]))
# Create an integrator function
f_step = SXFunction([p,uk,vk,hk],[u,v,h])
f_step.init()
# Expand to SX
#f_step = SXFunction(f_step)
#f_step.init()
# Integrate over one interval
uk = msym("uk",numboxes+1, numboxes)
vk = msym("vk",numboxes , numboxes+1)
hk = msym("hk",numboxes , numboxes)
p = msym("p",2)
u,v,h = uk, vk, hk
for j in range(num_eulersteps):
[u,v,h] = f_step.call([p,u,v,h])
# Create an integrator function
f = MXFunction([p,uk,vk,hk],[u,v,h])
f.init()
print "generated discrete dynamics"
# Allocate memory
u = copy.deepcopy(u0)
v = copy.deepcopy(v0)
h = copy.deepcopy(h0)
# Prepare plotting
if plot_progress:
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
#plt.clf()
#plt.grid(True)
plt.ion()
plt.hold(False)
plt.draw()
plt.show()
# Measurement
h_meas = []
# Simulate once to generate "measurements"
for k in range(num_measurements):
# Visualize the pool
if plot_progress:
#plt.ioff()
#print h[::numboxes_per_plot]
ax.cla()
surf = ax.plot_surface(X[::numboxes_per_plot],Y[::numboxes_per_plot],h[::numboxes_per_plot], rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=False, vmin = -spheight/10, vmax = spheight/10)
#plt.contour(X[::numboxes_per_plot],Y[::numboxes_per_plot],h[::numboxes_per_plot])
#ax.axis([0,poolwidth,0,poolwidth])
ax.set_zlim(-spheight, spheight)
plt.draw()
plt.show()
#time.sleep(0.02)
# Integrate
f.setInput([drag0,depth0],0)
f.setInput(u,1)
f.setInput(v,2)
f.setInput(h,3)
f.evaluate()
u = f.output(0).toArray()
v = f.output(1).toArray()
h = f.output(2).toArray()
# Save a copy of h
h_meas.append(copy.deepcopy(h))
print "measurements generated"
# Parameters in the single shooting problem
x = msym("x",2)
depth = x[0]
drag = x[1]
# Objective function
obj = 0
# Create expressions for objective functions and constraints
u = MX(u0)
v = MX(v0)
h = MX(h0)
for k in range(num_measurements):
# Evaluate dynamics
[u,v,h] = f.call([x,u,v,h])
# Evaluate objective function
[obj_k] = m.call([h,h_meas[k]])
# add to objective
obj += obj_k
# Formulate the single shooting NLP
ffcn = MXFunction([x],[obj])
#ffcn.setOption("verbose",True)
ffcn.init()
hfcb = ffcn.jacobian()
#ffcn = SXFunction(ffcn)
#t1 = time.time()
#ffcn.evaluate()
#t2 = time.time()
#print "t = %g" % (t2-t1)
#raise Exception('ss')
# Solve with IPOPT
nlp_solver = IpoptSolver(ffcn)
# Set options
#nlp_solver.setOption("generate_hessian",True)
#nlp_solver.setOption("verbose",True)
nlp_solver.setOption("max_iter",10)
#nlp_solver.setOption("mu_init",1e-10)
nlp_solver.setOption("derivative_test","first-order")
# Initialize NLP solver
nlp_solver.init()
# Set initial condition and bounds
nlp_solver.setInput([drag0,depth0],"x0")
#nlp_solver.setInput([2.0,0.01],"x0")
#nlp_solver.setInput([0.5,0.01],"x0")
nlp_solver.setInput([0,0],"lbx")
# Solve single shooting problem
nlp_solver.evaluate()
| lgpl-3.0 |
tawsifkhan/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
plowman/python-mcparseface | models/syntaxnet/tensorflow/tensorflow/examples/skflow/iris_val_based_early_stopping.py | 3 | 2275 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
from tensorflow.contrib import learn
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size=0.2, random_state=42)
val_monitor = learn.monitors.ValidationMonitor(X_val, y_val,
early_stopping_rounds=200,
n_classes=3)
# classifier with early stopping on training data
classifier1 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier1.fit(X_train, y_train, logdir='/tmp/iris_model/')
score1 = metrics.accuracy_score(y_test, classifier1.predict(X_test))
# classifier with early stopping on validation data
classifier2 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier2.fit(X_train, y_train, val_monitor, logdir='/tmp/iris_model_val/')
score2 = metrics.accuracy_score(y_test, classifier2.predict(X_test))
# in many applications, the score is improved by using early stopping on val data
print(score2 > score1)
| apache-2.0 |
KasperPRasmussen/bokeh | sphinx/source/conf.py | 3 | 8350 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
#
# Bokeh documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 12 23:43:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.graphviz',
'sphinx.ext.ifconfig',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.bokeh_sitemap',
'bokeh.sphinxext.collapsible_code_block',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bokeh'
copyright = '© Copyright 2015, Continuum Analytics.'
# Get the standard computed Bokeh version string to use for |version|
# and |release|
from bokeh import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Check for version override (e.g. when re-deploying a previously released
# docs, or when pushing test docs that do not have a corresponding BokehJS
# available on CDN)
from bokeh.settings import settings
if settings.docs_version():
version = release = settings.docs_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Sort members by type
autodoc_member_order = 'groupwise'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bokeh_theme'
html_theme_path = ['.']
MAIN_SITE = '//bokehplots.com'
html_context = {
'SITEMAP_BASE_URL': 'http://bokeh.pydata.org/en/', # Trailing slash is needed
'SITENAME': 'Bokeh Docs',
'DESCRIPTION': 'Bokeh visualization library, documentation site.',
'AUTHOR': 'Bokeh contributors',
# Nav
'NAV': (
('About', MAIN_SITE + '/pages/about-bokeh.html'),
('Gallery', '/docs/gallery.html'),
('Docs', '//bokeh.pydata.org/en/latest/'),
('Github', '//github.com/bokeh/bokeh'),
),
# Links
'LINKS': (
('FAQs', MAIN_SITE + '/pages/faqs.html'),
('Technical vision', MAIN_SITE + '/pages/technical-vision.html'),
('Roadmap', MAIN_SITE + '/pages/roadmap.html'),
('Citation', MAIN_SITE + '/pages/citation.html'),
),
# About Links
'ABOUT': (
('About', MAIN_SITE + '/pages/about-bokeh.html'),
('Team', MAIN_SITE + '/pages/team.html'),
('Contact', MAIN_SITE + '/pages/contact.html'),
),
# Social links
'SOCIAL': (
('Contribute', MAIN_SITE + '/pages/contribute.html'),
('Mailing list', '//groups.google.com/a/continuum.io/forum/#!forum/bokeh'),
('Github', '//github.com/bokeh/bokeh'),
('Twitter', '//twitter.com/BokehPlots'),
('YouTube', '//www.youtube.com/channel/UCK0rSk29mmg4UT4bIOvPYhw')
),
# Links for the docs sub navigation
'NAV_DOCS': (
('Installation', 'installation'),
('User Guide', 'user_guide'),
('Gallery', 'gallery'),
('Reference', 'reference'),
('Releases', 'releases/%s' % version),
('Developer Guide', 'dev_guide'),
),
'ALL_VERSIONS': ['0.11.0', '0.11.0', '0.10.0', '0.9.3', '0.8.2'],
'css_server': os.environ.get('BOKEH_DOCS_CSS_SERVER', 'bokehplots.com'),
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bokehdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bokeh.tex', u'Bokeh Documentation', u'Continuum Analytics', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bokeh', u'Bokeh Documentation',
[u'Continuum Analytics'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bokeh', u'Bokeh Documentation', u'Continuum Analytics', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# intersphinx settings
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)
}
| bsd-3-clause |
akartik80/8thSemProject | RandomForest.py | 1 | 1876 | import pandas as pd
import numpy as np
import re
import csv
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_auc_score
used_cols = ['city','bhk','max_budget',
'house_type','furnishing','state']
def read_data (f, header = True, test = False):
data = []
labels = []
csv_reader = csv.reader(open(f, "r"), delimiter=",")
index = 0
for row in csv_reader:
index = index + 1
if header and index == 1:
continue
if not test:
rowx = []
labels.append(np.int64(int(row[5])-1))
for i in range(0, len(used_cols)-1):
if row[i] == '':
print index
rowx.append(int(row[i]))
data.append(np.array(rowx))
return (data, labels)
train, labels = read_data("data/training.csv")
test, test_label = read_data("data/testing.csv")
train_mat = np.mat(train)
test_mat = np.mat(test)
count = 0
rf = RandomForestClassifier(n_estimators=150, max_features=5, min_samples_leaf=3, random_state=5000)
rf.fit(train_mat, labels)
rf_predict_labels = rf.predict_proba(test_mat)[:, 1]
predict = rf.predict(test)
count = 0
for i in range(0, len(predict)):
if predict[i] == test_label[i]:
count = count+1
cv_score = cross_val_score(rf, train_mat, labels, cv=5, scoring='roc_auc')
print "Number of correct labels = ",count
print "The Roc_auc_score is = " , roc_auc_score(test_label, rf_predict_labels)
print "Mean value of Cross_validation score =", np.mean(cv_score)
print "Max value of Cross_validation score =", np.max(cv_score)
print "Standard deviation of Cross_validation score =", np.std(cv_score)
print "Accuracy as per testing data = " ,accuracy_score(test_label, predict) | mit |
glennq/scikit-learn | sklearn/datasets/lfw.py | 4 | 19661 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove, rename
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
archive_path_temp = archive_path + ".tmp"
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path_temp)
rename(archive_path_temp, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
matk86/pymatgen | pymatgen/io/abinit/tasks.py | 2 | 172712 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""This module provides functions and classes related to Task objects."""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import time
import datetime
import shutil
import collections
import abc
import copy
import ruamel.yaml as yaml
import six
import numpy as np
from pprint import pprint
from itertools import product
from six.moves import map, zip, StringIO
from monty.dev import deprecated
from monty.string import is_string, list_strings
from monty.termcolor import colored, cprint
from monty.collections import AttrDict
from monty.functools import lazy_property, return_none_if_raise
from monty.json import MSONable
from monty.fnmatch import WildCard
from pymatgen.core.units import Memory
from pymatgen.util.serialization import json_pretty_dump, pmg_serialize
from .utils import File, Directory, irdvars_for_ext, abi_splitext, FilepathFixer, Condition, SparseHistogram
from .qadapters import make_qadapter, QueueAdapter, QueueAdapterError
from . import qutils as qu
from .db import DBConnector
from .nodes import Status, Node, NodeError, NodeResults, NodeCorrections, FileNode, check_spectator
from . import abiinspect
from . import events
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"TaskManager",
"AbinitBuild",
"ParalHintsParser",
"ParalHints",
"AbinitTask",
"ScfTask",
"NscfTask",
"RelaxTask",
"DdkTask",
"PhononTask",
"SigmaTask",
"OpticTask",
"AnaddbTask",
]
import logging
logger = logging.getLogger(__name__)
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
def lennone(PropperOrNone):
if PropperOrNone is None:
return 0
else:
return len(PropperOrNone)
def nmltostring(nml):
"""Convert a dictionary representing a Fortran namelist into a string."""
if not isinstance(nml,dict):
raise ValueError("nml should be a dict !")
curstr = ""
for key,group in nml.items():
namelist = ["&" + key]
for k, v in group.items():
if isinstance(v, list) or isinstance(v, tuple):
namelist.append(k + " = " + ",".join(map(str, v)) + ",")
elif is_string(v):
namelist.append(k + " = '" + str(v) + "',")
else:
namelist.append(k + " = " + str(v) + ",")
namelist.append("/")
curstr = curstr + "\n".join(namelist) + "\n"
return curstr
class TaskResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
JSON_SCHEMA["properties"] = {
"executable": {"type": "string", "required": True},
}
@classmethod
def from_node(cls, task):
"""Initialize an instance from an :class:`AbinitTask` instance."""
new = super(TaskResults, cls).from_node(task)
new.update(
executable=task.executable,
#executable_version:
#task_events=
pseudos=[p.as_dict() for p in task.input.pseudos],
#input=task.input
)
new.register_gridfs_files(
run_abi=(task.input_file.path, "t"),
run_abo=(task.output_file.path, "t"),
)
return new
class ParalConf(AttrDict):
"""
This object store the parameters associated to one
of the possible parallel configurations reported by ABINIT.
Essentially it is a dictionary whose values can also be accessed
as attributes. It also provides default values for selected keys
that might not be present in the ABINIT dictionary.
Example:
--- !Autoparal
info:
version: 1
autoparal: 1
max_ncpus: 108
configurations:
- tot_ncpus: 2 # Total number of CPUs
mpi_ncpus: 2 # Number of MPI processes.
omp_ncpus: 1 # Number of OMP threads (1 if not present)
mem_per_cpu: 10 # Estimated memory requirement per MPI processor in Megabytes.
efficiency: 0.4 # 1.0 corresponds to an "expected" optimal efficiency (strong scaling).
vars: { # Dictionary with the variables that should be added to the input.
varname1: varvalue1
varname2: varvalue2
}
-
...
For paral_kgb we have:
nproc npkpt npspinor npband npfft bandpp weight
108 1 1 12 9 2 0.25
108 1 1 108 1 2 27.00
96 1 1 24 4 1 1.50
84 1 1 12 7 2 0.25
"""
_DEFAULTS = {
"omp_ncpus": 1,
"mem_per_cpu": 0.0,
"vars": {}
}
def __init__(self, *args, **kwargs):
super(ParalConf, self).__init__(*args, **kwargs)
# Add default values if not already in self.
for k, v in self._DEFAULTS.items():
if k not in self:
self[k] = v
def __str__(self):
stream = StringIO()
pprint(self, stream=stream)
return stream.getvalue()
# TODO: Change name in abinit
# Remove tot_ncpus from Abinit
@property
def num_cores(self):
return self.mpi_procs * self.omp_threads
@property
def mem_per_proc(self):
return self.mem_per_cpu
@property
def mpi_procs(self):
return self.mpi_ncpus
@property
def omp_threads(self):
return self.omp_ncpus
@property
def speedup(self):
"""Estimated speedup reported by ABINIT."""
return self.efficiency * self.num_cores
@property
def tot_mem(self):
"""Estimated total memory in Mbs (computed from mem_per_proc)"""
return self.mem_per_proc * self.mpi_procs
class ParalHintsError(Exception):
"""Base error class for `ParalHints`."""
class ParalHintsParser(object):
Error = ParalHintsError
def __init__(self):
# Used to push error strings.
self._errors = collections.deque(maxlen=100)
def add_error(self, errmsg):
self._errors.append(errmsg)
def parse(self, filename):
"""
Read the `AutoParal` section (YAML format) from filename.
Assumes the file contains only one section.
"""
with abiinspect.YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag("!Autoparal")
try:
d = yaml.safe_load(doc.text_notag)
return ParalHints(info=d["info"], confs=d["configurations"])
except:
import traceback
sexc = traceback.format_exc()
err_msg = "Wrong YAML doc:\n%s\n\nException:\n%s" % (doc.text, sexc)
self.add_error(err_msg)
logger.critical(err_msg)
raise self.Error(err_msg)
class ParalHints(collections.Iterable):
"""
Iterable with the hints for the parallel execution reported by ABINIT.
"""
Error = ParalHintsError
def __init__(self, info, confs):
self.info = info
self._confs = [ParalConf(**d) for d in confs]
@classmethod
def from_mpi_omp_lists(cls, mpi_procs, omp_threads):
"""
Build a list of Parallel configurations from two lists
containing the number of MPI processes and the number of OpenMP threads
i.e. product(mpi_procs, omp_threads).
The configuration have parallel efficiency set to 1.0 and no input variables.
Mainly used for preparing benchmarks.
"""
info = {}
confs = [ParalConf(mpi_ncpus=p, omp_ncpus=p, efficiency=1.0)
for p, t in product(mpi_procs, omp_threads)]
return cls(info, confs)
def __getitem__(self, key):
return self._confs[key]
def __iter__(self):
return self._confs.__iter__()
def __len__(self):
return self._confs.__len__()
def __repr__(self):
return "\n".join(str(conf) for conf in self)
def __str__(self):
return repr(self)
@lazy_property
def max_cores(self):
"""Maximum number of cores."""
return max(c.mpi_procs * c.omp_threads for c in self)
@lazy_property
def max_mem_per_proc(self):
"""Maximum memory per MPI process."""
return max(c.mem_per_proc for c in self)
@lazy_property
def max_speedup(self):
"""Maximum speedup."""
return max(c.speedup for c in self)
@lazy_property
def max_efficiency(self):
"""Maximum parallel efficiency."""
return max(c.efficiency for c in self)
@pmg_serialize
def as_dict(self, **kwargs):
return {"info": self.info, "confs": self._confs}
@classmethod
def from_dict(cls, d):
return cls(info=d["info"], confs=d["confs"])
def copy(self):
"""Shallow copy of self."""
return copy.copy(self)
def select_with_condition(self, condition, key=None):
"""
Remove all the configurations that do not satisfy the given condition.
Args:
condition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax
key: Selects the sub-dictionary on which condition is applied, e.g. key="vars"
if we have to filter the configurations depending on the values in vars
"""
condition = Condition.as_condition(condition)
new_confs = []
for conf in self:
# Select the object on which condition is applied
obj = conf if key is None else AttrDict(conf[key])
add_it = condition(obj=obj)
#if key is "vars": print("conf", conf, "added:", add_it)
if add_it: new_confs.append(conf)
self._confs = new_confs
def sort_by_efficiency(self, reverse=True):
"""Sort the configurations in place. items with highest efficiency come first"""
self._confs.sort(key=lambda c: c.efficiency, reverse=reverse)
return self
def sort_by_speedup(self, reverse=True):
"""Sort the configurations in place. items with highest speedup come first"""
self._confs.sort(key=lambda c: c.speedup, reverse=reverse)
return self
def sort_by_mem_per_proc(self, reverse=False):
"""Sort the configurations in place. items with lowest memory per proc come first."""
# Avoid sorting if mem_per_cpu is not available.
if any(c.mem_per_proc > 0.0 for c in self):
self._confs.sort(key=lambda c: c.mem_per_proc, reverse=reverse)
return self
def multidimensional_optimization(self, priorities=("speedup", "efficiency")):
# Mapping property --> options passed to sparse_histogram
opts = dict(speedup=dict(step=1.0), efficiency=dict(step=0.1), mem_per_proc=dict(memory=1024))
#opts = dict(zip(priorities, bin_widths))
opt_confs = self._confs
for priority in priorities:
histogram = SparseHistogram(opt_confs, key=lambda c: getattr(c, priority), **opts[priority])
pos = 0 if priority == "mem_per_proc" else -1
opt_confs = histogram.values[pos]
#histogram.plot(show=True, savefig="hello.pdf")
return self.__class__(info=self.info, confs=opt_confs)
#def histogram_efficiency(self, step=0.1):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel efficiency."""
# return SparseHistogram(self._confs, key=lambda c: c.efficiency, step=step)
#def histogram_speedup(self, step=1.0):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel speedup."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def histogram_memory(self, step=1024):
# """Returns a :class:`SparseHistogram` with configuration grouped by memory."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def filter(self, qadapter):
# """Return a new list of configurations that can be executed on the `QueueAdapter` qadapter."""
# new_confs = [pconf for pconf in self if qadapter.can_run_pconf(pconf)]
# return self.__class__(info=self.info, confs=new_confs)
def get_ordered_with_policy(self, policy, max_ncpus):
"""
Sort and return a new list of configurations ordered according to the :class:`TaskPolicy` policy.
"""
# Build new list since we are gonna change the object in place.
hints = self.__class__(self.info, confs=[c for c in self if c.num_cores <= max_ncpus])
# First select the configurations satisfying the condition specified by the user (if any)
bkp_hints = hints.copy()
if policy.condition:
logger.info("Applying condition %s" % str(policy.condition))
hints.select_with_condition(policy.condition)
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.condition")
# Now filter the configurations depending on the values in vars
bkp_hints = hints.copy()
if policy.vars_condition:
logger.info("Applying vars_condition %s" % str(policy.vars_condition))
hints.select_with_condition(policy.vars_condition, key="vars")
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.vars_condition")
if len(policy.autoparal_priorities) == 1:
# Example: hints.sort_by_speedup()
if policy.autoparal_priorities[0] in ['efficiency', 'speedup', 'mem_per_proc']:
getattr(hints, "sort_by_" + policy.autoparal_priorities[0])()
elif isinstance(policy.autoparal_priorities[0], collections.Mapping):
if policy.autoparal_priorities[0]['meta_priority'] == 'highest_speedup_minimum_efficiency_cutoff':
min_efficiency = policy.autoparal_priorities[0].get('minimum_efficiency', 1.0)
hints.select_with_condition({'efficiency': {'$gte': min_efficiency}})
hints.sort_by_speedup()
else:
hints = hints.multidimensional_optimization(priorities=policy.autoparal_priorities)
if len(hints) == 0: raise ValueError("len(hints) == 0")
#TODO: make sure that num_cores == 1 is never selected when we have more than one configuration
#if len(hints) > 1:
# hints.select_with_condition(dict(num_cores={"$eq": 1)))
# Return final (orderded ) list of configurations (best first).
return hints
class TaskPolicy(object):
"""
This object stores the parameters used by the :class:`TaskManager` to
create the submission script and/or to modify the ABINIT variables
governing the parallel execution. A `TaskPolicy` object contains
a set of variables that specify the launcher, as well as the options
and the conditions used to select the optimal configuration for the parallel run
"""
@classmethod
def as_policy(cls, obj):
"""
Converts an object obj into a `:class:`TaskPolicy. Accepts:
* None
* TaskPolicy
* dict-like object
"""
if obj is None:
# Use default policy.
return TaskPolicy()
else:
if isinstance(obj, cls):
return obj
elif isinstance(obj, collections.Mapping):
return cls(**obj)
else:
raise TypeError("Don't know how to convert type %s to %s" % (type(obj), cls))
@classmethod
def autodoc(cls):
return """
autoparal: # (integer). 0 to disable the autoparal feature (DEFAULT: 1 i.e. autoparal is on)
condition: # condition used to filter the autoparal configurations (Mongodb-like syntax).
# DEFAULT: empty i.e. ignored.
vars_condition: # Condition used to filter the list of ABINIT variables reported by autoparal
# (Mongodb-like syntax). DEFAULT: empty i.e. ignored.
frozen_timeout: # A job is considered frozen and its status is set to ERROR if no change to
# the output file has been done for `frozen_timeout` seconds. Accepts int with seconds or
# string in slurm form i.e. days-hours:minutes:seconds. DEFAULT: 1 hour.
precedence: # Under development.
autoparal_priorities: # Under development.
"""
def __init__(self, **kwargs):
"""
See autodoc
"""
self.autoparal = kwargs.pop("autoparal", 1)
self.condition = Condition(kwargs.pop("condition", {}))
self.vars_condition = Condition(kwargs.pop("vars_condition", {}))
self.precedence = kwargs.pop("precedence", "autoparal_conf")
self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup"])
#self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup", "efficiecy", "memory"]
# TODO frozen_timeout could be computed as a fraction of the timelimit of the qadapter!
self.frozen_timeout = qu.slurm_parse_timestr(kwargs.pop("frozen_timeout", "0-1:00:00"))
if kwargs:
raise ValueError("Found invalid keywords in policy section:\n %s" % str(kwargs.keys()))
# Consistency check.
if self.precedence not in ("qadapter", "autoparal_conf"):
raise ValueError("Wrong value for policy.precedence, should be qadapter or autoparal_conf")
def __str__(self):
lines = []
app = lines.append
for k, v in self.__dict__.items():
if k.startswith("_"): continue
app("%s: %s" % (k, v))
return "\n".join(lines)
class ManagerIncreaseError(Exception):
"""
Exception raised by the manager if the increase request failed
"""
class FixQueueCriticalError(Exception):
"""
error raised when an error could not be fixed at the task level
"""
# Global variable used to store the task manager returned by `from_user_config`.
_USER_CONFIG_TASKMANAGER = None
class TaskManager(MSONable):
"""
A `TaskManager` is responsible for the generation of the job script and the submission
of the task, as well as for the specification of the parameters passed to the resource manager
(e.g. Slurm, PBS ...) and/or the run-time specification of the ABINIT variables governing the parallel execution.
A `TaskManager` delegates the generation of the submission script and the submission of the task to the :class:`QueueAdapter`.
A `TaskManager` has a :class:`TaskPolicy` that governs the specification of the parameters for the parallel executions.
Ideally, the TaskManager should be the **main entry point** used by the task to deal with job submission/optimization
"""
YAML_FILE = "manager.yml"
USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")
ENTRIES = {"policy", "qadapters", "db_connector", "batch_adapter"}
@classmethod
def autodoc(cls):
from .db import DBConnector
s = """
# TaskManager configuration file (YAML Format)
policy:
# Dictionary with options used to control the execution of the tasks.
qadapters:
# List of qadapters objects (mandatory)
- # qadapter_1
- # qadapter_2
db_connector:
# Connection to MongoDB database (optional)
batch_adapter:
# Adapter used to submit flows with batch script. (optional)
##########################################
# Individual entries are documented below:
##########################################
"""
s += "policy: " + TaskPolicy.autodoc() + "\n"
s += "qadapter: " + QueueAdapter.autodoc() + "\n"
#s += "db_connector: " + DBConnector.autodoc()
return s
@classmethod
def from_user_config(cls):
"""
Initialize the :class:`TaskManager` from the YAML file 'manager.yaml'.
Search first in the working directory and then in the abipy configuration directory.
Raises:
RuntimeError if file is not found.
"""
global _USER_CONFIG_TASKMANAGER
if _USER_CONFIG_TASKMANAGER is not None:
return _USER_CONFIG_TASKMANAGER
# Try in the current directory then in user configuration directory.
path = os.path.join(os.getcwd(), cls.YAML_FILE)
if not os.path.exists(path):
path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)
if not os.path.exists(path):
raise RuntimeError(colored(
"\nCannot locate %s neither in current directory nor in %s\n"
"!!! PLEASE READ THIS: !!!\n"
"To use abipy to run jobs this file must be present\n"
"It provides a description of the cluster/computer you are running on\n"
"Examples are provided in abipy/data/managers." % (cls.YAML_FILE, path), color="red"))
_USER_CONFIG_TASKMANAGER = cls.from_file(path)
return _USER_CONFIG_TASKMANAGER
@classmethod
def from_file(cls, filename):
"""Read the configuration parameters from the Yaml file filename."""
try:
with open(filename, "r") as fh:
return cls.from_dict(yaml.safe_load(fh))
except Exception as exc:
print("Error while reading TaskManager parameters from %s\n" % filename)
raise
@classmethod
def from_string(cls, s):
"""Create an instance from string s containing a YAML dictionary."""
return cls.from_dict(yaml.safe_load(s))
@classmethod
def as_manager(cls, obj):
"""
Convert obj into TaskManager instance. Accepts string, filepath, dictionary, `TaskManager` object.
If obj is None, the manager is initialized from the user config file.
"""
if isinstance(obj, cls): return obj
if obj is None: return cls.from_user_config()
if is_string(obj):
if os.path.exists(obj):
return cls.from_file(obj)
else:
return cls.from_string(obj)
elif isinstance(obj, collections.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s to TaskManager" % type(obj))
@classmethod
def from_dict(cls, d):
"""Create an instance from a dictionary."""
return cls(**{k: v for k, v in d.items() if k in cls.ENTRIES})
@pmg_serialize
def as_dict(self):
return self._kwargs
def __init__(self, **kwargs):
"""
Args:
policy:None
qadapters:List of qadapters in YAML format
db_connector:Dictionary with data used to connect to the database (optional)
"""
# Keep a copy of kwargs
self._kwargs = copy.deepcopy(kwargs)
self.policy = TaskPolicy.as_policy(kwargs.pop("policy", None))
# Initialize database connector (if specified)
self.db_connector = DBConnector(**kwargs.pop("db_connector", {}))
# Build list of QAdapters. Neglect entry if priority == 0 or `enabled: no"
qads = []
for d in kwargs.pop("qadapters"):
if d.get("enabled", False): continue
qad = make_qadapter(**d)
if qad.priority > 0:
qads.append(qad)
elif qad.priority < 0:
raise ValueError("qadapter cannot have negative priority:\n %s" % qad)
if not qads:
raise ValueError("Received emtpy list of qadapters")
#if len(qads) != 1:
# raise NotImplementedError("For the time being multiple qadapters are not supported! Please use one adapter")
# Order qdapters according to priority.
qads = sorted(qads, key=lambda q: q.priority)
priorities = [q.priority for q in qads]
if len(priorities) != len(set(priorities)):
raise ValueError("Two or more qadapters have same priority. This is not allowed. Check taskmanager.yml")
self._qads, self._qadpos = tuple(qads), 0
# Initialize the qadapter for batch script submission.
d = kwargs.pop("batch_adapter", None)
self.batch_adapter = None
if d: self.batch_adapter = make_qadapter(**d)
#print("batch_adapter", self.batch_adapter)
if kwargs:
raise ValueError("Found invalid keywords in the taskmanager file:\n %s" % str(list(kwargs.keys())))
@lazy_property
def abinit_build(self):
""":class:`AbinitBuild` object with Abinit version and options used to build the code"""
return AbinitBuild(manager=self)
def to_shell_manager(self, mpi_procs=1):
"""
Returns a new `TaskManager` with the same parameters as self but replace the :class:`QueueAdapter`
with a :class:`ShellAdapter` with mpi_procs so that we can submit the job without passing through the queue.
"""
my_kwargs = copy.deepcopy(self._kwargs)
my_kwargs["policy"] = TaskPolicy(autoparal=0)
# On BlueGene we need at least two qadapters.
# One for running jobs on the computing nodes and another one
# for running small jobs on the fronted. These two qadapters
# will have different enviroments and different executables.
# If None of the q-adapters has qtype==shell, we change qtype to shell
# and we return a new Manager for sequential jobs with the same parameters as self.
# If the list contains a qadapter with qtype == shell, we ignore the remaining qadapters
# when we build the new Manager.
has_shell_qad = False
for d in my_kwargs["qadapters"]:
if d["queue"]["qtype"] == "shell": has_shell_qad = True
if has_shell_qad:
my_kwargs["qadapters"] = [d for d in my_kwargs["qadapters"] if d["queue"]["qtype"] == "shell"]
for d in my_kwargs["qadapters"]:
d["queue"]["qtype"] = "shell"
d["limits"]["min_cores"] = mpi_procs
d["limits"]["max_cores"] = mpi_procs
# If shell_runner is specified, replace mpi_runner with shell_runner
# in the script used to run jobs on the frontend.
# On same machines based on Slurm, indeed, mpirun/mpiexec is not available
# and jobs should be executed with `srun -n4 exec` when running on the computing nodes
# or with `exec` when running in sequential on the frontend.
if "job" in d and "shell_runner" in d["job"]:
shell_runner = d["job"]["shell_runner"]
#print("shell_runner:", shell_runner, type(shell_runner))
if not shell_runner or shell_runner == "None": shell_runner = ""
d["job"]["mpi_runner"] = shell_runner
#print("shell_runner:", shell_runner)
#print(my_kwargs)
new = self.__class__(**my_kwargs)
new.set_mpi_procs(mpi_procs)
return new
def new_with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Return a new `TaskManager` in which autoparal has been disabled.
The jobs will be executed with `mpi_procs` MPI processes and `omp_threads` OpenMP threads.
Useful for generating input files for benchmarks.
"""
new = self.deepcopy()
new.policy.autoparal = 0
new.set_mpi_procs(mpi_procs)
new.set_omp_threads(omp_threads)
return new
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.qadapter.QTYPE.lower() != "shell"
@property
def qads(self):
"""List of :class:`QueueAdapter` objects sorted according to priorities (highest comes first)"""
return self._qads
@property
def qadapter(self):
"""The qadapter used to submit jobs."""
return self._qads[self._qadpos]
def select_qadapter(self, pconfs):
"""
Given a list of parallel configurations, pconfs, this method select an `optimal` configuration
according to some criterion as well as the :class:`QueueAdapter` to use.
Args:
pconfs: :class:`ParalHints` object with the list of parallel configurations
Returns:
:class:`ParallelConf` object with the `optimal` configuration.
"""
# Order the list of configurations according to policy.
policy, max_ncpus = self.policy, self.max_cores
pconfs = pconfs.get_ordered_with_policy(policy, max_ncpus)
if policy.precedence == "qadapter":
# Try to run on the qadapter with the highest priority.
for qadpos, qad in enumerate(self.qads):
possible_pconfs = [pc for pc in pconfs if qad.can_run_pconf(pc)]
if qad.allocation == "nodes":
#if qad.allocation in ["nodes", "force_nodes"]:
# Select the configuration divisible by nodes if possible.
for pconf in possible_pconfs:
if pconf.num_cores % qad.hw.cores_per_node == 0:
return self._use_qadpos_pconf(qadpos, pconf)
# Here we select the first one.
if possible_pconfs:
return self._use_qadpos_pconf(qadpos, possible_pconfs[0])
elif policy.precedence == "autoparal_conf":
# Try to run on the first pconf irrespectively of the priority of the qadapter.
for pconf in pconfs:
for qadpos, qad in enumerate(self.qads):
if qad.allocation == "nodes" and not pconf.num_cores % qad.hw.cores_per_node == 0:
continue # Ignore it. not very clean
if qad.can_run_pconf(pconf):
return self._use_qadpos_pconf(qadpos, pconf)
else:
raise ValueError("Wrong value of policy.precedence = %s" % policy.precedence)
# No qadapter could be found
raise RuntimeError("Cannot find qadapter for this run!")
def _use_qadpos_pconf(self, qadpos, pconf):
"""
This function is called when we have accepted the :class:`ParalConf` pconf.
Returns pconf
"""
self._qadpos = qadpos
# Change the number of MPI/OMP cores.
self.set_mpi_procs(pconf.mpi_procs)
if self.has_omp: self.set_omp_threads(pconf.omp_threads)
# Set memory per proc.
#FIXME: Fixer may have changed the memory per proc and should not be resetted by ParalConf
#self.set_mem_per_proc(pconf.mem_per_proc)
return pconf
def __str__(self):
"""String representation."""
lines = []
app = lines.append
#app("[Task policy]\n%s" % str(self.policy))
for i, qad in enumerate(self.qads):
app("[Qadapter %d]\n%s" % (i, str(qad)))
app("Qadapter selected: %d" % self._qadpos)
if self.has_db:
app("[MongoDB database]:")
app(str(self.db_connector))
return "\n".join(lines)
@property
def has_db(self):
"""True if we are using MongoDB database"""
return bool(self.db_connector)
@property
def has_omp(self):
"""True if we are using OpenMP parallelization."""
return self.qadapter.has_omp
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.qadapter.num_cores
@property
def mpi_procs(self):
"""Number of MPI processes."""
return self.qadapter.mpi_procs
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return self.qadapter.mem_per_proc
@property
def omp_threads(self):
"""Number of OpenMP threads"""
return self.qadapter.omp_threads
def deepcopy(self):
"""Deep copy of self."""
return copy.deepcopy(self)
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to use."""
self.qadapter.set_mpi_procs(mpi_procs)
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMp threads to use."""
self.qadapter.set_omp_threads(omp_threads)
def set_mem_per_proc(self, mem_mb):
"""Set the memory (in Megabytes) per CPU."""
self.qadapter.set_mem_per_proc(mem_mb)
@property
def max_cores(self):
"""
Maximum number of cores that can be used.
This value is mainly used in the autoparal part to get the list of possible configurations.
"""
return max(q.hint_cores for q in self.qads)
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue,
returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
return self.qadapter.get_njobs_in_queue(username=username)
def cancel(self, job_id):
"""Cancel the job. Returns exit status."""
return self.qadapter.cancel(job_id)
def write_jobfile(self, task, **kwargs):
"""
Write the submission script. Return the path of the script
================ ============================================
kwargs Meaning
================ ============================================
exec_args List of arguments passed to task.executable.
Default: no arguments.
================ ============================================
"""
script = self.qadapter.get_script_str(
job_name=task.name,
launch_dir=task.workdir,
executable=task.executable,
qout_path=task.qout_file.path,
qerr_path=task.qerr_file.path,
stdin=task.files_file.path,
stdout=task.log_file.path,
stderr=task.stderr_file.path,
exec_args=kwargs.pop("exec_args", []),
)
# Write the script.
with open(task.job_file.path, "w") as fh:
fh.write(script)
task.job_file.chmod(0o740)
return task.job_file.path
def launch(self, task, **kwargs):
"""
Build the input files and submit the task via the :class:`Qadapter`
Args:
task: :class:`TaskObject`
Returns:
Process object.
"""
if task.status == task.S_LOCKED:
raise ValueError("You shall not submit a locked task!")
# Build the task
task.build()
# Pass information on the time limit to Abinit (we always assume ndtset == 1)
#if False and isinstance(task, AbinitTask):
if isinstance(task, AbinitTask):
args = kwargs.get("exec_args", [])
if args is None: args = []
args = args[:]
args.append("--timelimit %s" % qu.time2slurm(self.qadapter.timelimit))
kwargs["exec_args"] = args
logger.info("Will pass timelimit option to abinit %s:" % args)
# Write the submission script
script_file = self.write_jobfile(task, **kwargs)
# Submit the task and save the queue id.
try:
qjob, process = self.qadapter.submit_to_queue(script_file)
task.set_status(task.S_SUB, msg='Submitted to queue')
task.set_qjob(qjob)
return process
except self.qadapter.MaxNumLaunchesError as exc:
# TODO: Here we should try to switch to another qadapter
# 1) Find a new parallel configuration in those stored in task.pconfs
# 2) Change the input file.
# 3) Regenerate the submission script
# 4) Relaunch
task.set_status(task.S_ERROR, msg="max_num_launches reached: %s" % str(exc))
raise
def get_collection(self, **kwargs):
"""Return the MongoDB collection used to store the results."""
return self.db_connector.get_collection(**kwargs)
def increase_mem(self):
# OLD
# with GW calculations in mind with GW mem = 10,
# the response fuction is in memory and not distributed
# we need to increase memory if jobs fail ...
# return self.qadapter.more_mem_per_proc()
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase mem')
def increase_ncpus(self):
"""
increase the number of cpus, first ask the current quadapter, if that one raises a QadapterIncreaseError
switch to the next qadapter. If all fail raise an ManagerIncreaseError
"""
try:
self.qadapter.more_cores()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase ncpu')
def increase_resources(self):
try:
self.qadapter.more_cores()
return
except QueueAdapterError:
pass
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase resources')
def exclude_nodes(self, nodes):
try:
self.qadapter.exclude_nodes(nodes=nodes)
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to exclude nodes')
def increase_time(self):
try:
self.qadapter.more_time()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase time')
class AbinitBuild(object):
"""
This object stores information on the options used to build Abinit
.. attribute:: info
String with build information as produced by `abinit -b`
.. attribute:: version
Abinit version number e.g 8.0.1 (string)
.. attribute:: has_netcdf
True if netcdf is enabled.
.. attribute:: has_omp
True if OpenMP is enabled.
.. attribute:: has_mpi
True if MPI is enabled.
.. attribute:: has_mpiio
True if MPI-IO is supported.
"""
def __init__(self, workdir=None, manager=None):
manager = TaskManager.as_manager(manager).to_shell_manager(mpi_procs=1)
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
# Generate a shell script to execute `abinit -b`
stdout = os.path.join(workdir, "run.abo")
script = manager.qadapter.get_script_str(
job_name="abinit_b",
launch_dir=workdir,
executable="abinit",
qout_path=os.path.join(workdir, "queue.qout"),
qerr_path=os.path.join(workdir, "queue.qerr"),
#stdin=os.path.join(workdir, "run.files"),
stdout=stdout,
stderr=os.path.join(workdir, "run.err"),
exec_args=["-b"],
)
# Execute the script.
script_file = os.path.join(workdir, "job.sh")
with open(script_file, "wt") as fh:
fh.write(script)
qjob, process = manager.qadapter.submit_to_queue(script_file)
process.wait()
# To avoid: ResourceWarning: unclosed file <_io.BufferedReader name=87> in py3k
process.stderr.close()
if process.returncode != 0:
logger.critical("Error while executing %s" % script_file)
with open(stdout, "rt") as fh:
self.info = fh.read()
# info string has the following format.
"""
=== Build Information ===
Version : 8.0.1
Build target : x86_64_darwin15.0.0_gnu5.3
Build date : 20160122
=== Compiler Suite ===
C compiler : gnu
C++ compiler : gnuApple
Fortran compiler : gnu5.3
CFLAGS : -g -O2 -mtune=native -march=native
CXXFLAGS : -g -O2 -mtune=native -march=native
FCFLAGS : -g -ffree-line-length-none
FC_LDFLAGS :
=== Optimizations ===
Debug level : basic
Optimization level : standard
Architecture : unknown_unknown
=== Multicore ===
Parallel build : yes
Parallel I/O : yes
openMP support : no
GPU support : no
=== Connectors / Fallbacks ===
Connectors on : yes
Fallbacks on : yes
DFT flavor : libxc-fallback+atompaw-fallback+wannier90-fallback
FFT flavor : none
LINALG flavor : netlib
MATH flavor : none
TIMER flavor : abinit
TRIO flavor : netcdf+etsf_io-fallback
=== Experimental features ===
Bindings : @enable_bindings@
Exports : no
GW double-precision : yes
=== Bazaar branch information ===
Branch ID : gmatteo@gmac-20160112110440-lf6exhneqim9082h
Revision : 1226
Committed : 0
"""
self.has_netcdf = False
self.has_omp = False
self.has_mpi, self.has_mpiio = False, False
def yesno2bool(line):
ans = line.split()[-1]
return dict(yes=True, no=False)[ans]
# Parse info.
for line in self.info.splitlines():
if "Version" in line: self.version = line.split()[-1]
if "TRIO flavor" in line:
self.has_netcdf = "netcdf" in line
if "openMP support" in line: self.has_omp = yesno2bool(line)
if "Parallel build" in line: self.has_mpi = yesno2bool(line)
if "Parallel I/O" in line: self.has_mpiio = yesno2bool(line)
def __str__(self):
lines = []
app = lines.append
app("Abinit Build Information:")
app(" Abinit version: %s" % self.version)
app(" MPI: %s, MPI-IO: %s, OpenMP: %s" % (self.has_mpi, self.has_mpiio, self.has_omp))
app(" Netcdf: %s" % self.has_netcdf)
return "\n".join(lines)
def version_ge(self, version_string):
"""True is Abinit version is >= version_string"""
return self.compare_version(version_string, ">=")
def compare_version(self, version_string, op):
"""Compare Abinit version to `version_string` with operator `op`"""
from pkg_resources import parse_version
from monty.operator import operator_from_str
op = operator_from_str(op)
return op(parse_version(self.version), parse_version(version_string))
class FakeProcess(object):
"""
This object is attached to a :class:`Task` instance if the task has not been submitted
This trick allows us to simulate a process that is still running so that
we can safely poll task.process.
"""
def poll(self):
return None
def wait(self):
raise RuntimeError("Cannot wait a FakeProcess")
def communicate(self, input=None):
raise RuntimeError("Cannot communicate with a FakeProcess")
def kill(self):
raise RuntimeError("Cannot kill a FakeProcess")
@property
def returncode(self):
return None
class MyTimedelta(datetime.timedelta):
"""A customized version of timedelta whose __str__ method doesn't print microseconds."""
def __new__(cls, days, seconds, microseconds):
return datetime.timedelta.__new__(cls, days, seconds, microseconds)
def __str__(self):
"""Remove microseconds from timedelta default __str__"""
s = super(MyTimedelta, self).__str__()
microsec = s.find(".")
if microsec != -1: s = s[:microsec]
return s
@classmethod
def as_timedelta(cls, delta):
"""Convert delta into a MyTimedelta object."""
# Cannot monkey patch the __class__ and must pass through __new__ as the object is immutable.
if isinstance(delta, cls): return delta
return cls(delta.days, delta.seconds, delta.microseconds)
class TaskDateTimes(object):
"""
Small object containing useful :class:`datetime.datatime` objects associated to important events.
.. attributes:
init: initialization datetime
submission: submission datetime
start: Begin of execution.
end: End of execution.
"""
def __init__(self):
self.init = datetime.datetime.now()
self.submission, self.start, self.end = None, None, None
def __str__(self):
lines = []
app = lines.append
app("Initialization done on: %s" % self.init)
if self.submission is not None: app("Submitted on: %s" % self.submission)
if self.start is not None: app("Started on: %s" % self.start)
if self.end is not None: app("Completed on: %s" % self.end)
return "\n".join(lines)
def reset(self):
"""Reinitialize the counters."""
self = self.__class__()
def get_runtime(self):
""":class:`timedelta` with the run-time, None if the Task is not running"""
if self.start is None: return None
if self.end is None:
delta = datetime.datetime.now() - self.start
else:
delta = self.end - self.start
return MyTimedelta.as_timedelta(delta)
def get_time_inqueue(self):
"""
:class:`timedelta` with the time spent in the Queue, None if the Task is not running
.. note:
This value is always greater than the real value computed by the resource manager
as we start to count only when check_status sets the `Task` status to S_RUN.
"""
if self.submission is None: return None
if self.start is None:
delta = datetime.datetime.now() - self.submission
else:
delta = self.start - self.submission
# This happens when we read the exact start datetime from the ABINIT log file.
if delta.total_seconds() < 0: delta = datetime.timedelta(seconds=0)
return MyTimedelta.as_timedelta(delta)
class TaskError(NodeError):
"""Base Exception for :class:`Task` methods"""
class TaskRestartError(TaskError):
"""Exception raised while trying to restart the :class:`Task`."""
class Task(six.with_metaclass(abc.ABCMeta, Node)):
"""A Task is a node that performs some kind of calculation."""
# Use class attributes for TaskErrors so that we don't have to import them.
Error = TaskError
RestartError = TaskRestartError
# List of `AbinitEvent` subclasses that are tested in the check_status method.
# Subclasses should provide their own list if they need to check the converge status.
CRITICAL_EVENTS = []
# Prefixes for Abinit (input, output, temporary) files.
Prefix = collections.namedtuple("Prefix", "idata odata tdata")
pj = os.path.join
prefix = Prefix(pj("indata", "in"), pj("outdata", "out"), pj("tmpdata", "tmp"))
del Prefix, pj
def __init__(self, input, workdir=None, manager=None, deps=None):
"""
Args:
input: :class:`AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
deps: Dictionary specifying the dependency of this node.
None means that this Task has no dependency.
"""
# Init the node
super(Task, self).__init__()
self._input = input
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
# Handle possible dependencies.
if deps:
self.add_deps(deps)
# Date-time associated to submission, start and end.
self.datetimes = TaskDateTimes()
# Count the number of restarts.
self.num_restarts = 0
self._qjob = None
self.queue_errors = []
self.abi_errors = []
# two flags that provide, dynamically, information on the scaling behavious of a task. If any process of fixing
# finds none scaling behaviour, they should be switched. If a task type is clearly not scaling they should be
# swiched.
self.mem_scales = True
self.load_scales = True
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the process since Subprocess objects cannot be pickled.
This is the reason why we have to store the returncode in self._returncode instead
of using self.process.returncode.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_process"]}
#@check_spectator
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Files required for the execution.
self.input_file = File(os.path.join(self.workdir, "run.abi"))
self.output_file = File(os.path.join(self.workdir, "run.abo"))
self.files_file = File(os.path.join(self.workdir, "run.files"))
self.job_file = File(os.path.join(self.workdir, "job.sh"))
self.log_file = File(os.path.join(self.workdir, "run.log"))
self.stderr_file = File(os.path.join(self.workdir, "run.err"))
self.start_lockfile = File(os.path.join(self.workdir, "__startlock__"))
# This file is produced by Abinit if nprocs > 1 and MPI_ABORT.
self.mpiabort_file = File(os.path.join(self.workdir, "__ABI_MPIABORTFILE__"))
# Directories with input|output|temporary data.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
# stderr and output file of the queue manager. Note extensions.
self.qerr_file = File(os.path.join(self.workdir, "queue.qerr"))
self.qout_file = File(os.path.join(self.workdir, "queue.qout"))
def set_manager(self, manager):
"""Set the :class:`TaskManager` used to launch the Task."""
self.manager = manager.deepcopy()
@property
def work(self):
"""The :class:`Work` containing this `Task`."""
return self._work
def set_work(self, work):
"""Set the :class:`Work` associated to this `Task`."""
if not hasattr(self, "_work"):
self._work = work
else:
if self._work != work:
raise ValueError("self._work != work")
@property
def flow(self):
"""The :class:`Flow` containing this `Task`."""
return self.work.flow
@lazy_property
def pos(self):
"""The position of the task in the :class:`Flow`"""
for i, task in enumerate(self.work):
if self == task:
return self.work.pos, i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos[0]) + "_t" + str(self.pos[1])
@property
def num_launches(self):
"""
Number of launches performed. This number includes both possible ABINIT restarts
as well as possible launches done due to errors encountered with the resource manager
or the hardware/software."""
return sum(q.num_launches for q in self.manager.qads)
@property
def input(self):
"""AbinitInput object."""
return self._input
def get_inpvar(self, varname, default=None):
"""Return the value of the ABINIT variable varname, None if not present."""
return self.input.get(varname, default)
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input file. Return dict with old values.
"""
kwargs.update(dict(*args))
old_values = {vname: self.input.get(vname) for vname in kwargs}
self.input.set_vars(**kwargs)
if kwargs or old_values:
self.history.info("Setting input variables: %s" % str(kwargs))
self.history.info("Old values: %s" % str(old_values))
return old_values
@property
def initial_structure(self):
"""Initial structure of the task."""
return self.input.structure
def make_input(self, with_header=False):
"""Construct the input file of the calculation."""
s = str(self.input)
if with_header: s = str(self) + "\n" + s
return s
def ipath_from_ext(self, ext):
"""
Returns the path of the input file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.idata + "_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.odata + "_" + ext)
@property
@abc.abstractmethod
def executable(self):
"""
Path to the executable associated to the task (internally stored in self._executable).
"""
def set_executable(self, executable):
"""Set the executable associate to this task."""
self._executable = executable
@property
def process(self):
try:
return self._process
except AttributeError:
# Attach a fake process so that we can poll it.
return FakeProcess()
@property
def is_completed(self):
"""True if the task has been executed."""
return self.status >= self.S_DONE
@property
def can_run(self):
"""The task can run if its status is < S_SUB and all the other dependencies (if any) are done!"""
all_ok = all(stat == self.S_OK for stat in self.deps_status)
return self.status < self.S_SUB and self.status != self.S_LOCKED and all_ok
#@check_spectator
def cancel(self):
"""Cancel the job. Returns 1 if job was cancelled."""
if self.queue_id is None: return 0
if self.status >= self.S_DONE: return 0
exit_status = self.manager.cancel(self.queue_id)
if exit_status != 0:
logger.warning("manager.cancel returned exit_status: %s" % exit_status)
return 0
# Remove output files and reset the status.
self.history.info("Job %s cancelled by user" % self.queue_id)
self.reset()
return 1
def with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Disable autoparal and force execution with `mpi_procs` MPI processes
and `omp_threads` OpenMP threads. Useful for generating benchmarks.
"""
manager = self.manager if hasattr(self, "manager") else self.flow.manager
self.manager = manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
#@check_spectator
def _on_done(self):
self.fix_ofiles()
#@check_spectator
def _on_ok(self):
# Fix output file names.
self.fix_ofiles()
# Get results
results = self.on_ok()
self.finalized = True
return results
#@check_spectator
def on_ok(self):
"""
This method is called once the `Task` has reached status S_OK.
Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
#@check_spectator
def fix_ofiles(self):
"""
This method is called when the task reaches S_OK.
It changes the extension of particular output files
produced by Abinit so that the 'official' extension
is preserved e.g. out_1WF14 --> out_1WF
"""
filepaths = self.outdir.list_filepaths()
logger.info("in fix_ofiles with filepaths %s" % list(filepaths))
old2new = FilepathFixer().fix_paths(filepaths)
for old, new in old2new.items():
self.history.info("will rename old %s to new %s" % (old, new))
os.rename(old, new)
#@check_spectator
def _restart(self, submit=True):
"""
Called by restart once we have finished preparing the task for restarting.
Return:
True if task has been restarted
"""
self.set_status(self.S_READY, msg="Restarted on %s" % time.asctime())
# Increase the counter.
self.num_restarts += 1
self.history.info("Restarted, num_restarts %d" % self.num_restarts)
# Reset datetimes
self.datetimes.reset()
if submit:
# Remove the lock file
self.start_lockfile.remove()
# Relaunch the task.
fired = self.start()
if not fired: self.history.warning("Restart failed")
else:
fired = False
return fired
#@check_spectator
def restart(self):
"""
Restart the calculation. Subclasses should provide a concrete version that
performs all the actions needed for preparing the restart and then calls self._restart
to restart the task. The default implementation is empty.
Returns:
1 if job was restarted, 0 otherwise.
"""
logger.debug("Calling the **empty** restart method of the base class")
return 0
def poll(self):
"""Check if child process has terminated. Set and return returncode attribute."""
self._returncode = self.process.poll()
if self._returncode is not None:
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def wait(self):
"""Wait for child process to terminate. Set and return returncode attribute."""
self._returncode = self.process.wait()
try:
self.process.stderr.close()
except:
pass
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def communicate(self, input=None):
"""
Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a string to be sent to the
child process, or None, if no data should be sent to the child.
communicate() returns a tuple (stdoutdata, stderrdata).
"""
stdoutdata, stderrdata = self.process.communicate(input=input)
self._returncode = self.process.returncode
self.set_status(self.S_DONE, "status set to Done")
return stdoutdata, stderrdata
def kill(self):
"""Kill the child."""
self.process.kill()
self.set_status(self.S_ERROR, "status set to Error by task.kill")
self._returncode = self.process.returncode
@property
def returncode(self):
"""
The child return code, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
try:
return self._returncode
except AttributeError:
return 0
def reset(self):
"""
Reset the task status. Mainly used if we made a silly mistake in the initial
setup of the queue manager and we want to fix it and rerun the task.
Returns:
0 on success, 1 if reset failed.
"""
# Can only reset tasks that are done.
# One should be able to reset 'Submitted' tasks (sometimes, they are not in the queue
# and we want to restart them)
#if self.status != self.S_SUB and self.status < self.S_DONE: return 1
# Remove output files otherwise the EventParser will think the job is still running
self.output_file.remove()
self.log_file.remove()
self.stderr_file.remove()
self.start_lockfile.remove()
self.qerr_file.remove()
self.qout_file.remove()
self.set_status(self.S_INIT, msg="Reset on %s" % time.asctime())
self.set_qjob(None)
return 0
@property
@return_none_if_raise(AttributeError)
def queue_id(self):
"""Queue identifier returned by the Queue manager. None if not set"""
return self.qjob.qid
@property
@return_none_if_raise(AttributeError)
def qname(self):
"""Queue name identifier returned by the Queue manager. None if not set"""
return self.qjob.qname
@property
def qjob(self):
return self._qjob
def set_qjob(self, qjob):
"""Set info on queue after submission."""
self._qjob = qjob
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.manager.qadapter.QTYPE.lower() != "shell"
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.manager.num_cores
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self.manager.mpi_procs
@property
def omp_threads(self):
"""Number of CPUs used for OpenMP."""
return self.manager.omp_threads
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return Memory(self.manager.mem_per_proc, "Mb")
@property
def status(self):
"""Gives the status of the task."""
return self._status
def lock(self, source_node):
"""Lock the task, source is the :class:`Node` that applies the lock."""
if self.status != self.S_INIT:
raise ValueError("Trying to lock a task with status %s" % self.status)
self._status = self.S_LOCKED
self.history.info("Locked by node %s", source_node)
def unlock(self, source_node, check_status=True):
"""
Unlock the task, set its status to `S_READY` so that the scheduler can submit it.
source_node is the :class:`Node` that removed the lock
Call task.check_status if check_status is True.
"""
if self.status != self.S_LOCKED:
raise RuntimeError("Trying to unlock a task with status %s" % self.status)
self._status = self.S_READY
if check_status: self.check_status()
self.history.info("Unlocked by %s", source_node)
#@check_spectator
def set_status(self, status, msg):
"""
Set and return the status of the task.
Args:
status: Status object or string representation of the status
msg: string with human-readable message used in the case of errors.
"""
# truncate string if it's long. msg will be logged in the object and we don't want to waste memory.
if len(msg) > 2000:
msg = msg[:2000]
msg += "\n... snip ...\n"
# Locked files must be explicitly unlocked
if self.status == self.S_LOCKED or status == self.S_LOCKED:
err_msg = (
"Locked files must be explicitly unlocked before calling set_status but\n"
"task.status = %s, input status = %s" % (self.status, status))
raise RuntimeError(err_msg)
status = Status.as_status(status)
changed = True
if hasattr(self, "_status"):
changed = (status != self._status)
self._status = status
if status == self.S_RUN:
# Set datetimes.start when the task enters S_RUN
if self.datetimes.start is None:
self.datetimes.start = datetime.datetime.now()
# Add new entry to history only if the status has changed.
if changed:
if status == self.S_SUB:
self.datetimes.submission = datetime.datetime.now()
self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % (
self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg))
elif status == self.S_OK:
self.history.info("Task completed %s", msg)
elif status == self.S_ABICRITICAL:
self.history.info("Status set to S_ABI_CRITICAL due to: %s", msg)
else:
self.history.info("Status changed to %s. msg: %s", status, msg)
#######################################################
# The section belows contains callbacks that should not
# be executed if we are in spectator_mode
#######################################################
if status == self.S_DONE:
# Execute the callback
self._on_done()
if status == self.S_OK:
# Finalize the task.
if not self.finalized:
self._on_ok()
# here we remove the output files of the task and of its parents.
if self.gc is not None and self.gc.policy == "task":
self.clean_output_files()
self.send_signal(self.S_OK)
return status
def check_status(self):
"""
This function checks the status of the task by inspecting the output and the
error files produced by the application and by the queue manager.
"""
# 1) see it the job is blocked
# 2) see if an error occured at submitting the job the job was submitted, TODO these problems can be solved
# 3) see if there is output
# 4) see if abinit reports problems
# 5) see if both err files exist and are empty
# 6) no output and no err files, the job must still be running
# 7) try to find out what caused the problems
# 8) there is a problem but we did not figure out what ...
# 9) the only way of landing here is if there is a output file but no err files...
# 1) A locked task can only be unlocked by calling set_status explicitly.
# an errored task, should not end up here but just to be sure
black_list = (self.S_LOCKED, self.S_ERROR)
#if self.status in black_list: return self.status
# 2) Check the returncode of the process (the process of submitting the job) first.
# this point type of problem should also be handled by the scheduler error parser
if self.returncode != 0:
# The job was not submitted properly
return self.set_status(self.S_QCRITICAL, msg="return code %s" % self.returncode)
# If we have an abort file produced by Abinit
if self.mpiabort_file.exists:
return self.set_status(self.S_ABICRITICAL, msg="Found ABINIT abort file")
# Analyze the stderr file for Fortran runtime errors.
# getsize is 0 if the file is empty or it does not exist.
err_msg = None
if self.stderr_file.getsize() != 0:
#if self.stderr_file.exists:
err_msg = self.stderr_file.read()
# Analyze the stderr file of the resource manager runtime errors.
# TODO: Why are we looking for errors in queue.qerr?
qerr_info = None
if self.qerr_file.getsize() != 0:
#if self.qerr_file.exists:
qerr_info = self.qerr_file.read()
# Analyze the stdout file of the resource manager (needed for PBS !)
qout_info = None
if self.qout_file.getsize():
#if self.qout_file.exists:
qout_info = self.qout_file.read()
# Start to check ABINIT status if the output file has been created.
#if self.output_file.getsize() != 0:
if self.output_file.exists:
try:
report = self.get_event_report()
except Exception as exc:
msg = "%s exception while parsing event_report:\n%s" % (self, exc)
return self.set_status(self.S_ABICRITICAL, msg=msg)
if report is None:
return self.set_status(self.S_ERROR, msg="got None report!")
if report.run_completed:
# Here we set the correct timing data reported by Abinit
self.datetimes.start = report.start_datetime
self.datetimes.end = report.end_datetime
# Check if the calculation converged.
not_ok = report.filter_types(self.CRITICAL_EVENTS)
if not_ok:
return self.set_status(self.S_UNCONVERGED, msg='status set to unconverged based on abiout')
else:
return self.set_status(self.S_OK, msg="status set to ok based on abiout")
# Calculation still running or errors?
if report.errors:
# Abinit reported problems
logger.debug('Found errors in report')
for error in report.errors:
logger.debug(str(error))
try:
self.abi_errors.append(error)
except AttributeError:
self.abi_errors = [error]
# The job is unfixable due to ABINIT errors
logger.debug("%s: Found Errors or Bugs in ABINIT main output!" % self)
msg = "\n".join(map(repr, report.errors))
return self.set_status(self.S_ABICRITICAL, msg=msg)
# 5)
if self.stderr_file.exists and not err_msg:
if self.qerr_file.exists and not qerr_info:
# there is output and no errors
# The job still seems to be running
return self.set_status(self.S_RUN, msg='there is output and no errors: job still seems to be running')
# 6)
if not self.output_file.exists:
logger.debug("output_file does not exists")
if not self.stderr_file.exists and not self.qerr_file.exists:
# No output at allThe job is still in the queue.
return self.status
# 7) Analyze the files of the resource manager and abinit and execution err (mvs)
if qerr_info or qout_info:
from pymatgen.io.abinit.scheduler_error_parsers import get_parser
scheduler_parser = get_parser(self.manager.qadapter.QTYPE, err_file=self.qerr_file.path,
out_file=self.qout_file.path, run_err_file=self.stderr_file.path)
if scheduler_parser is None:
return self.set_status(self.S_QCRITICAL,
msg="Cannot find scheduler_parser for qtype %s" % self.manager.qadapter.QTYPE)
scheduler_parser.parse()
if scheduler_parser.errors:
# Store the queue errors in the task
self.queue_errors = scheduler_parser.errors
# The job is killed or crashed and we know what happened
msg = "scheduler errors found:\n%s" % str(scheduler_parser.errors)
return self.set_status(self.S_QCRITICAL, msg=msg)
elif lennone(qerr_info) > 0:
# if only qout_info, we are not necessarily in QCRITICAL state,
# since there will always be info in the qout file
self.history.info('found unknown messages in the queue error: %s' % str(qerr_info))
#try:
# rt = self.datetimes.get_runtime().seconds
#except:
# rt = -1.0
#tl = self.manager.qadapter.timelimit
#if rt > tl:
# msg += 'set to error : runtime (%s) exceded walltime (%s)' % (rt, tl)
# print(msg)
# return self.set_status(self.S_ERROR, msg=msg)
# The job may be killed or crashed but we don't know what happened
# It may also be that an innocent message was written to qerr, so we wait for a while
# it is set to QCritical, we will attempt to fix it by running on more resources
# 8) analizing the err files and abinit output did not identify a problem
# but if the files are not empty we do have a problem but no way of solving it:
if lennone(err_msg) > 0:
msg = 'found error message:\n %s' % str(err_msg)
return self.set_status(self.S_QCRITICAL, msg=msg)
# The job is killed or crashed but we don't know what happend
# it is set to QCritical, we will attempt to fix it by running on more resources
# 9) if we still haven't returned there is no indication of any error and the job can only still be running
# but we should actually never land here, or we have delays in the file system ....
# print('the job still seems to be running maybe it is hanging without producing output... ')
# Check time of last modification.
if self.output_file.exists and \
(time.time() - self.output_file.get_stat().st_mtime > self.manager.policy.frozen_timeout):
msg = "Task seems to be frozen, last change more than %s [s] ago" % self.manager.policy.frozen_timeout
return self.set_status(self.S_ERROR, msg=msg)
# Handle weird case in which either run.abo, or run.log have not been produced
#if self.status not in (self.S_INIT, self.S_READY) and (not self.output.file.exists or not self.log_file.exits):
# msg = "Task have been submitted but cannot find the log file or the output file"
# return self.set_status(self.S_ERROR, msg)
return self.set_status(self.S_RUN, msg='final option: nothing seems to be wrong, the job must still be running')
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
Should be overwritten by specific tasks.
"""
return False
def speed_up(self):
"""
Method that can be called by the flow to decrease the time needed for a specific task.
Returns True in case of success, False in case of Failure
Should be overwritten by specific tasks.
"""
return False
def out_to_in(self, out_file):
"""
Move an output file to the output data directory of the `Task`
and rename the file so that ABINIT will read it as an input data file.
Returns:
The absolute path of the new file in the indata directory.
"""
in_file = os.path.basename(out_file).replace("out", "in", 1)
dest = os.path.join(self.indir.path, in_file)
if os.path.exists(dest) and not os.path.islink(dest):
logger.warning("Will overwrite %s with %s" % (dest, out_file))
os.rename(out_file, dest)
return dest
def inlink_file(self, filepath):
"""
Create a symbolic link to the specified file in the
directory containing the input files of the task.
"""
if not os.path.exists(filepath):
logger.debug("Creating symbolic link to not existent file %s" % filepath)
# Extract the Abinit extension and add the prefix for input files.
root, abiext = abi_splitext(filepath)
infile = "in_" + abiext
infile = self.indir.path_in(infile)
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
self.history.info("Linking path %s --> %s" % (filepath, infile))
if not os.path.exists(infile):
os.symlink(filepath, infile)
else:
if os.path.realpath(infile) != filepath:
raise self.Error("infile %s does not point to filepath %s" % (infile, filepath))
def make_links(self):
"""
Create symbolic links to the output files produced by the other tasks.
.. warning::
This method should be called only when the calculation is READY because
it uses a heuristic approach to find the file to link.
"""
for dep in self.deps:
filepaths, exts = dep.get_filepaths_and_exts()
for path, ext in zip(filepaths, exts):
logger.info("Need path %s with ext %s" % (path, ext))
dest = self.ipath_from_ext(ext)
if not os.path.exists(path):
# Try netcdf file.
# TODO: this case should be treated in a cleaner way.
path += ".nc"
if os.path.exists(path): dest += ".nc"
if not os.path.exists(path):
raise self.Error("%s: %s is needed by this task but it does not exist" % (self, path))
if path.endswith(".nc") and not dest.endswith(".nc"): # NC --> NC file
dest += ".nc"
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
logger.debug("Linking path %s --> %s" % (path, dest))
if not os.path.exists(dest):
os.symlink(path, dest)
else:
# check links but only if we haven't performed the restart.
# in this case, indeed we may have replaced the file pointer with the
# previous output file of the present task.
if os.path.realpath(dest) != path and self.num_restarts == 0:
raise self.Error("dest %s does not point to path %s" % (dest, path))
@abc.abstractmethod
def setup(self):
"""Public method called before submitting the task."""
def _setup(self):
"""
This method calls self.setup after having performed additional operations
such as the creation of the symbolic links needed to connect different tasks.
"""
self.make_links()
self.setup()
def get_event_report(self, source="log"):
"""
Analyzes the main logfile of the calculation for possible Errors or Warnings.
If the ABINIT abort file is found, the error found in this file are added to
the output report.
Args:
source: "output" for the main output file,"log" for the log file.
Returns:
:class:`EventReport` instance or None if the source file file does not exist.
"""
# By default, we inspect the main log file.
ofile = {
"output": self.output_file,
"log": self.log_file}[source]
parser = events.EventsParser()
if not ofile.exists:
if not self.mpiabort_file.exists:
return None
else:
# ABINIT abort file without log!
abort_report = parser.parse(self.mpiabort_file.path)
return abort_report
try:
report = parser.parse(ofile.path)
#self._prev_reports[source] = report
# Add events found in the ABI_MPIABORTFILE.
if self.mpiabort_file.exists:
logger.critical("Found ABI_MPIABORTFILE!!!!!")
abort_report = parser.parse(self.mpiabort_file.path)
if len(abort_report) != 1:
logger.critical("Found more than one event in ABI_MPIABORTFILE")
# Weird case: empty abort file, let's skip the part
# below and hope that the log file contains the error message.
#if not len(abort_report): return report
# Add it to the initial report only if it differs
# from the last one found in the main log file.
last_abort_event = abort_report[-1]
if report and last_abort_event != report[-1]:
report.append(last_abort_event)
else:
report.append(last_abort_event)
return report
#except parser.Error as exc:
except Exception as exc:
# Return a report with an error entry with info on the exception.
msg = "%s: Exception while parsing ABINIT events:\n %s" % (ofile, str(exc))
self.set_status(self.S_ABICRITICAL, msg=msg)
return parser.report_exception(ofile.path, exc)
def get_results(self, **kwargs):
"""
Returns :class:`NodeResults` instance.
Subclasses should extend this method (if needed) by adding
specialized code that performs some kind of post-processing.
"""
# Check whether the process completed.
if self.returncode is None:
raise self.Error("return code is None, you should call wait, communitate or poll")
if self.status is None or self.status < self.S_DONE:
raise self.Error("Task is not completed")
return self.Results.from_node(self)
def move(self, dest, is_abspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir.
Use is_abspath=True to specify an absolute path.
"""
if not is_abspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def in_files(self):
"""Return all the input data files used."""
return self.indir.list_filepaths()
def out_files(self):
"""Return all the output data files produced."""
return self.outdir.list_filepaths()
def tmp_files(self):
"""Return all the input data files produced."""
return self.tmpdir.list_filepaths()
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the top-level working directory."""
return os.path.join(self.workdir, filename)
def rename(self, src_basename, dest_basename, datadir="outdir"):
"""
Rename a file located in datadir.
src_basename and dest_basename are the basename of the source file
and of the destination file, respectively.
"""
directory = {
"indir": self.indir,
"outdir": self.outdir,
"tmpdir": self.tmpdir,
}[datadir]
src = directory.path_in(src_basename)
dest = directory.path_in(dest_basename)
os.rename(src, dest)
#@check_spectator
def build(self, *args, **kwargs):
"""
Creates the working directory and the input files of the :class:`Task`.
It does not overwrite files if they already exist.
"""
# Create dirs for input, output and tmp data.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Write files file and input file.
if not self.files_file.exists:
self.files_file.write(self.filesfile_string)
self.input_file.write(self.make_input())
self.manager.write_jobfile(self)
#@check_spectator
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by |.
Files matching one of the regular expressions will be preserved.
example: exclude_wildcard="*.nc|*.txt" preserves all the files whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
filepath = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(filepath)
def remove_files(self, *filenames):
"""Remove all the files listed in filenames."""
filenames = list_strings(filenames)
for dirpath, dirnames, fnames in os.walk(self.workdir):
for fname in fnames:
if fname in filenames:
filepath = os.path.join(dirpath, fname)
os.remove(filepath)
def clean_output_files(self, follow_parents=True):
"""
This method is called when the task reaches S_OK. It removes all the output files
produced by the task that are not needed by its children as well as the output files
produced by its parents if no other node needs them.
Args:
follow_parents: If true, the output files of the parents nodes will be removed if possible.
Return:
list with the absolute paths of the files that have been removed.
"""
paths = []
if self.status != self.S_OK:
logger.warning("Calling task.clean_output_files on a task whose status != S_OK")
# Remove all files in tmpdir.
self.tmpdir.clean()
# Find the file extensions that should be preserved since these files are still
# needed by the children who haven't reached S_OK
except_exts = set()
for child in self.get_children():
if child.status == self.S_OK: continue
# Find the position of self in child.deps and add the extensions.
i = [dep.node for dep in child.deps].index(self)
except_exts.update(child.deps[i].exts)
# Remove the files in the outdir of the task but keep except_exts.
exts = self.gc.exts.difference(except_exts)
#print("Will remove its extensions: ", exts)
paths += self.outdir.remove_exts(exts)
if not follow_parents: return paths
# Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled.
for parent in self.get_parents():
# Here we build a dictionary file extension --> list of child nodes requiring this file from parent
# e.g {"WFK": [node1, node2]}
ext2nodes = collections.defaultdict(list)
for child in parent.get_children():
if child.status == child.S_OK: continue
i = [d.node for d in child.deps].index(parent)
for ext in child.deps[i].exts:
ext2nodes[ext].append(child)
# Remove extension only if no node depends on it!
except_exts = [k for k, lst in ext2nodes.items() if lst]
exts = self.gc.exts.difference(except_exts)
#print("%s removes extensions %s from parent node %s" % (self, exts, parent))
paths += parent.outdir.remove_exts(exts)
self.history.info("Removed files: %s" % paths)
return paths
def setup(self):
"""Base class does not provide any hook."""
#@check_spectator
def start(self, **kwargs):
"""
Starts the calculation by performing the following steps:
- build dirs and files
- call the _setup method
- execute the job file by executing/submitting the job script.
Main entry point for the `Launcher`.
============== ==============================================================
kwargs Meaning
============== ==============================================================
autoparal False to skip the autoparal step (default True)
exec_args List of arguments passed to executable.
============== ==============================================================
Returns:
1 if task was started, 0 otherwise.
"""
if self.status >= self.S_SUB:
raise self.Error("Task status: %s" % str(self.status))
if self.start_lockfile.exists:
self.history.warning("Found lock file: %s" % self.start_lockfile.path)
return 0
self.start_lockfile.write("Started on %s" % time.asctime())
self.build()
self._setup()
# Add the variables needed to connect the node.
for d in self.deps:
cvars = d.connecting_vars()
self.history.info("Adding connecting vars %s" % cvars)
self.set_vars(cvars)
# Get (python) data from other nodes
d.apply_getters(self)
# Automatic parallelization
if kwargs.pop("autoparal", True) and hasattr(self, "autoparal_run"):
try:
self.autoparal_run()
except QueueAdapterError as exc:
# If autoparal cannot find a qadapter to run the calculation raises an Exception
self.history.critical(exc)
msg = "Error while trying to run autoparal in task:%s\n%s" % (repr(task), straceback())
cprint(msg, "yellow")
self.set_status(self.S_QCRITICAL, msg=msg)
return 0
except Exception as exc:
# Sometimes autoparal_run fails because Abinit aborts
# at the level of the parser e.g. cannot find the spacegroup
# due to some numerical noise in the structure.
# In this case we call fix_abicritical and then we try to run autoparal again.
self.history.critical("First call to autoparal failed with `%s`. Will try fix_abicritical" % exc)
msg = "autoparal_fake_run raised:\n%s" % straceback()
logger.critical(msg)
fixed = self.fix_abicritical()
if not fixed:
self.set_status(self.S_ABICRITICAL, msg="fix_abicritical could not solve the problem")
return 0
try:
self.autoparal_run()
self.history.info("Second call to autoparal succeeded!")
#cprint("Second call to autoparal succeeded!", "green")
except Exception as exc:
self.history.critical("Second call to autoparal failed with %s. Cannot recover!", exc)
msg = "Tried autoparal again but got:\n%s" % straceback()
cprint(msg, "red")
self.set_status(self.S_ABICRITICAL, msg=msg)
return 0
# Start the calculation in a subprocess and return.
self._process = self.manager.launch(self, **kwargs)
return 1
def start_and_wait(self, *args, **kwargs):
"""
Helper method to start the task and wait for completetion.
Mainly used when we are submitting the task via the shell without passing through a queue manager.
"""
self.start(*args, **kwargs)
retcode = self.wait()
return retcode
class DecreaseDemandsError(Exception):
"""
exception to be raised by a task if the request to decrease some demand, load or memory, could not be performed
"""
class AbinitTask(Task):
"""
Base class defining an ABINIT calculation
"""
Results = TaskResults
@classmethod
def from_input(cls, input, workdir=None, manager=None):
"""
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
return cls(input, workdir=workdir, manager=manager)
@classmethod
def temp_shell_task(cls, inp, mpi_procs=1, workdir=None, manager=None):
"""
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
Args:
mpi_procs: Number of MPI processes to use.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs))
task.set_name('temp_shell_task')
return task
def setup(self):
"""
Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z]
to the output file, and this breaks a lot of code that relies of the use of a unique file extension.
Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already
exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90.
"""
def rename_file(afile):
"""Helper function to rename :class:`File` objects. Return string for logging purpose."""
# Find the index of the last file (if any).
# TODO: Maybe it's better to use run.abo --> run(1).abo
fnames = [f for f in os.listdir(self.workdir) if f.startswith(afile.basename)]
nums = [int(f) for f in [f.split("_")[-1] for f in fnames] if f.isdigit()]
last = max(nums) if nums else 0
new_path = afile.path + "_" + str(last+1)
os.rename(afile.path, new_path)
return "Will rename %s to %s" % (afile.path, new_path)
logs = []
if self.output_file.exists: logs.append(rename_file(self.output_file))
if self.log_file.exists: logs.append(rename_file(self.log_file))
if logs:
self.history.info("\n".join(logs))
@property
def executable(self):
"""Path to the executable required for running the Task."""
try:
return self._executable
except AttributeError:
return "abinit"
@property
def pseudos(self):
"""List of pseudos used in the calculation."""
return self.input.pseudos
@property
def isnc(self):
"""True if norm-conserving calculation."""
return self.input.isnc
@property
def ispaw(self):
"""True if PAW calculation"""
return self.input.ispaw
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
pj = os.path.join
app(self.input_file.path) # Path to the input file
app(self.output_file.path) # Path to the output file
app(pj(self.workdir, self.prefix.idata)) # Prefix for input data
app(pj(self.workdir, self.prefix.odata)) # Prefix for output data
app(pj(self.workdir, self.prefix.tdata)) # Prefix for temporary data
# Paths to the pseudopotential files.
# Note that here the pseudos **must** be sorted according to znucl.
# Here we reorder the pseudos if the order is wrong.
ord_pseudos = []
znucl = [specie.number for specie in
self.input.structure.types_of_specie]
for z in znucl:
for p in self.pseudos:
if p.Z == z:
ord_pseudos.append(p)
break
else:
raise ValueError("Cannot find pseudo with znucl %s in pseudos:\n%s" % (z, self.pseudos))
for pseudo in ord_pseudos:
app(pseudo.path)
return "\n".join(lines)
def set_pconfs(self, pconfs):
"""Set the list of autoparal configurations."""
self._pconfs = pconfs
@property
def pconfs(self):
"""List of autoparal configurations."""
try:
return self._pconfs
except AttributeError:
return None
def uses_paral_kgb(self, value=1):
"""True if the task is a GS Task and uses paral_kgb with the given value."""
paral_kgb = self.get_inpvar("paral_kgb", 0)
# paral_kgb is used only in the GS part.
return paral_kgb == value and isinstance(self, GsTask)
def _change_structure(self, new_structure):
"""Change the input structure."""
# Compare new and old structure for logging purpose.
# TODO: Write method of structure to compare self and other and return a dictionary
old_structure = self.input.structure
old_lattice = old_structure.lattice
abc_diff = np.array(new_structure.lattice.abc) - np.array(old_lattice.abc)
angles_diff = np.array(new_structure.lattice.angles) - np.array(old_lattice.angles)
cart_diff = new_structure.cart_coords - old_structure.cart_coords
displs = np.array([np.sqrt(np.dot(v, v)) for v in cart_diff])
recs, tol_angle, tol_length = [], 10**-2, 10**-5
if np.any(np.abs(angles_diff) > tol_angle):
recs.append("new_agles - old_angles = %s" % angles_diff)
if np.any(np.abs(abc_diff) > tol_length):
recs.append("new_abc - old_abc = %s" % abc_diff)
if np.any(np.abs(displs) > tol_length):
min_pos, max_pos = displs.argmin(), displs.argmax()
recs.append("Mean displ: %.2E, Max_displ: %.2E (site %d), min_displ: %.2E (site %d)" %
(displs.mean(), displs[max_pos], max_pos, displs[min_pos], min_pos))
self.history.info("Changing structure (only significant diffs are shown):")
if not recs:
self.history.info("Input and output structure seems to be equal within the given tolerances")
else:
for rec in recs:
self.history.info(rec)
self.input.set_structure(new_structure)
#assert self.input.structure == new_structure
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the task
This method can change the ABINIT input variables and/or the
submission parameters e.g. the number of CPUs for MPI and OpenMp.
Set:
self.pconfs where pconfs is a :class:`ParalHints` object with the configuration reported by
autoparal and optimal is the optimal configuration selected.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# To avoid: ResourceWarning: unclosed file <_io.BufferedReader name=87> in py3k
process.stderr.close()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(list(autoparal_vars.keys()))
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
optconf = self.find_optconf(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished autoparallel run')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
def find_optconf(self, pconfs):
"""Find the optimal Parallel configuration."""
# Save pconfs for future reference.
self.set_pconfs(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
return optconf
def select_files(self, what="o"):
"""
Helper function used to select the files of a task.
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
"""
choices = collections.OrderedDict([
("i", self.input_file),
("o", self.output_file),
("f", self.files_file),
("j", self.job_file),
("l", self.log_file),
("e", self.stderr_file),
("q", self.qout_file),
])
if what == "all":
return [getattr(v, "path") for v in choices.values()]
selected = []
for c in what:
try:
selected.append(getattr(choices[c], "path"))
except KeyError:
logger.warning("Wrong keyword %s" % c)
return selected
def restart(self):
"""
general restart used when scheduler problems have been taken care of
"""
return self._restart()
#@check_spectator
def reset_from_scratch(self):
"""
Restart from scratch, this is to be used if a job is restarted with more resources after a crash
Move output files produced in workdir to _reset otherwise check_status continues
to see the task as crashed even if the job did not run
"""
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
#@check_spectator
def fix_abicritical(self):
"""
method to fix crashes/error caused by abinit
Returns:
1 if task has been fixed else 0.
"""
event_handlers = self.event_handlers
if not event_handlers:
self.set_status(status=self.S_ERROR, msg='Empty list of event handlers. Cannot fix abi_critical errors')
return 0
count, done = 0, len(event_handlers) * [0]
report = self.get_event_report()
if report is None:
self.set_status(status=self.S_ERROR, msg='get_event_report returned None')
return 0
# Note we have loop over all possible events (slow, I know)
# because we can have handlers for Error, Bug or Warning
# (ideally only for CriticalWarnings but this is not done yet)
for event in report:
for i, handler in enumerate(self.event_handlers):
if handler.can_handle(event) and not done[i]:
logger.info("handler %s will try to fix event %s" % (handler, event))
try:
d = handler.handle_task_event(self, event)
if d:
done[i] += 1
count += 1
except Exception as exc:
logger.critical(str(exc))
if count:
self.reset_from_scratch()
return 1
self.set_status(status=self.S_ERROR, msg='We encountered AbiCritical events that could not be fixed')
return 0
#@check_spectator
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
self.history.info('fixing queue critical')
ret = "task.fix_queue_critical: "
if not self.queue_errors:
# TODO
# paral_kgb = 1 leads to nasty sigegv that are seen as Qcritical errors!
# Try to fallback to the conjugate gradient.
#if self.uses_paral_kgb(1):
# logger.critical("QCRITICAL with PARAL_KGB==1. Will try CG!")
# self.set_vars(paral_kgb=0)
# self.reset_from_scratch()
# return
# queue error but no errors detected, try to solve by increasing ncpus if the task scales
# if resources are at maximum the task is definitively turned to errored
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
ret += "increased resources"
return ret
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
print("Fix_qcritical: received %d queue_errors" % len(self.queue_errors))
print("type_list: %s" % list(type(qe) for qe in self.queue_errors))
for error in self.queue_errors:
self.history.info('fixing: %s' % str(error))
ret += str(error)
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
self.history.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
self.history.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
print('trying to increase time')
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
self.history.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
self.history.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def parse_timing(self):
"""
Parse the timer data in the main output file of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Return: :class:`AbinitTimerParser` instance, None if error.
"""
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(self.output_file.path)
if read_ok:
return parser
return None
class ProduceHist(object):
"""
Mixin class for an :class:`AbinitTask` producing a HIST file.
Provide the method `open_hist` that reads and return a HIST file.
"""
@property
def hist_path(self):
"""Absolute path of the HIST file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._hist_path
except AttributeError:
path = self.outdir.has_abiext("HIST")
if path: self._hist_path = path
return path
def open_hist(self):
"""
Open the HIST file located in the in self.outdir.
Returns :class:`HistFile` object, None if file could not be found or file is not readable.
"""
if not self.hist_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a HIST file in %s" % (self, self.outdir))
return None
# Open the HIST file
from abipy.dynamics.hist import HistFile
try:
return HistFile(self.hist_path)
except Exception as exc:
logger.critical("Exception while reading HIST file at %s:\n%s" % (self.hist_path, str(exc)))
return None
class GsTask(AbinitTask):
"""
Base class for ground-state tasks. A ground state task produces a GSR file
Provides the method `open_gsr` that reads and returns a GSR file.
"""
@property
def gsr_path(self):
"""Absolute path of the GSR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._gsr_path
except AttributeError:
path = self.outdir.has_abiext("GSR")
if path: self._gsr_path = path
return path
def open_gsr(self):
"""
Open the GSR file located in the in self.outdir.
Returns :class:`GsrFile` object, None if file could not be found or file is not readable.
"""
gsr_path = self.gsr_path
if not gsr_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
# Open the GSR file.
from abipy.electrons.gsr import GsrFile
try:
return GsrFile(gsr_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (gsr_path, str(exc)))
return None
class ScfTask(GsTask):
"""
Self-consistent ground-state calculations.
Provide support for in-place restart via (WFK|DEN) files
"""
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((255, 0, 0)) / 255
def restart(self):
"""SCF calculations can be restarted if we have either the WFK file or the DEN file."""
# Prefer WFK over DEN files since we can reuse the wavefunctions.
for ext in ("WFK", "DEN"):
restart_file = self.outdir.has_abiext(ext)
irdvars = irdvars_for_ext(ext)
if restart_file: break
else:
raise self.RestartError("%s: Cannot find WFK or DEN file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def inspect(self, **kwargs):
"""
Plot the SCF cycle results with matplotlib.
Returns
`matplotlib` figure, None if some error occurred.
"""
try:
scf_cycle = abiinspect.GroundStateScfCycle.from_file(self.output_file.path)
except IOError:
return None
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
return None
def get_results(self, **kwargs):
results = super(ScfTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class CollinearThenNonCollinearScfTask(ScfTask):
"""
A specialized ScfTaks that performs an initial SCF run with nsppol = 2.
The spin polarized WFK file is then used to start a non-collinear SCF run (nspinor == 2)
initialized from the previous WFK file.
"""
def __init__(self, input, workdir=None, manager=None, deps=None):
super(CollinearThenNonCollinearScfTask, self).__init__(input, workdir=workdir, manager=manager, deps=deps)
# Enforce nspinor = 1, nsppol = 2 and prtwf = 1.
self._input = self.input.deepcopy()
self.input.set_spin_mode("polarized")
self.input.set_vars(prtwf=1)
self.collinear_done = False
def _on_ok(self):
results = super(CollinearThenNonCollinearScfTask, self)._on_ok()
if not self.collinear_done:
self.input.set_spin_mode("spinor")
self.collinear_done = True
self.finalized = False
self.restart()
return results
class NscfTask(GsTask):
"""
Non-Self-consistent GS calculation. Provide in-place restart via WFK files
"""
CRITICAL_EVENTS = [
events.NscfConvergenceWarning,
]
color_rgb = np.array((255, 122, 122)) / 255
def restart(self):
"""NSCF calculations can be restarted only if we have the WFK file."""
ext = "WFK"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the WFK file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def get_results(self, **kwargs):
results = super(NscfTask, self).get_results(**kwargs)
# Read the GSR file.
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class RelaxTask(GsTask, ProduceHist):
"""
Task for structural optimizations.
"""
# TODO possible ScfConvergenceWarning?
CRITICAL_EVENTS = [
events.RelaxConvergenceWarning,
]
color_rgb = np.array((255, 61, 255)) / 255
def get_final_structure(self):
"""Read the final structure from the GSR file."""
try:
with self.open_gsr() as gsr:
return gsr.structure
except AttributeError:
raise RuntimeError("Cannot find the GSR file with the final structure to restart from.")
def restart(self):
"""
Restart the structural relaxation.
Structure relaxations can be restarted only if we have the WFK file or the DEN or the GSR file.
from which we can read the last structure (mandatory) and the wavefunctions (not mandatory but useful).
Prefer WFK over other files since we can reuse the wavefunctions.
.. note::
The problem in the present approach is that some parameters in the input
are computed from the initial structure and may not be consistent with
the modification of the structure done during the structure relaxation.
"""
restart_file = None
# Try to restart from the WFK file if possible.
# FIXME: This part has been disabled because WFK=IO is a mess if paral_kgb == 1
# This is also the reason why I wrote my own MPI-IO code for the GW part!
wfk_file = self.outdir.has_abiext("WFK")
if False and wfk_file:
irdvars = irdvars_for_ext("WFK")
restart_file = self.out_to_in(wfk_file)
# Fallback to DEN file. Note that here we look for out_DEN instead of out_TIM?_DEN
# This happens when the previous run completed and task.on_done has been performed.
# ********************************************************************************
# Note that it's possible to have an undected error if we have multiple restarts
# and the last relax died badly. In this case indeed out_DEN is the file produced
# by the last run that has executed on_done.
# ********************************************************************************
if restart_file is None:
for ext in ("", ".nc"):
out_den = self.outdir.path_in("out_DEN" + ext)
if os.path.exists(out_den):
irdvars = irdvars_for_ext("DEN")
restart_file = self.out_to_in(out_den)
break
if restart_file is None:
# Try to restart from the last TIM?_DEN file.
# This should happen if the previous run didn't complete in clean way.
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is not None:
if last_timden.path.endswith(".nc"):
ofile = self.outdir.path_in("out_DEN.nc")
else:
ofile = self.outdir.path_in("out_DEN")
os.rename(last_timden.path, ofile)
restart_file = self.out_to_in(ofile)
irdvars = irdvars_for_ext("DEN")
if restart_file is None:
# Don't raise RestartError as we can still change the structure.
self.history.warning("Cannot find the WFK|DEN|TIM?_DEN file to restart from.")
else:
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
self.history.info("Will restart from %s", restart_file)
# FIXME Here we should read the HIST file but restartxf if broken!
#self.set_vars({"restartxf": -1})
# Read the relaxed structure from the GSR file and change the input.
self._change_structure(self.get_final_structure())
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the evolution of the structural relaxation with matplotlib.
Args:
what: Either "hist" or "scf". The first option (default) extracts data
from the HIST file and plot the evolution of the structural
parameters, forces, pressures and energies.
The second option, extracts data from the main output file and
plot the evolution of the SCF cycles (etotal, residuals, etc).
Returns:
`matplotlib` figure, None if some error occurred.
"""
what = kwargs.pop("what", "hist")
if what == "hist":
# Read the hist file to get access to the structure.
with self.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
elif what == "scf":
# Get info on the different SCF cycles
relaxation = abiinspect.Relaxation.from_file(self.output_file.path)
if "title" not in kwargs: kwargs["title"] = str(self)
return relaxation.plot(**kwargs) if relaxation is not None else None
else:
raise ValueError("Wrong value for what %s" % what)
def get_results(self, **kwargs):
results = super(RelaxTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
def reduce_dilatmx(self, target=1.01):
actual_dilatmx = self.get_inpvar('dilatmx', 1.)
new_dilatmx = actual_dilatmx - min((actual_dilatmx-target), actual_dilatmx*0.05)
self.set_vars(dilatmx=new_dilatmx)
def fix_ofiles(self):
"""
Note that ABINIT produces lots of out_TIM1_DEN files for each step.
Here we list all TIM*_DEN files, we select the last one and we rename it in out_DEN
This change is needed so that we can specify dependencies with the syntax {node: "DEN"}
without having to know the number of iterations needed to converge the run in node!
"""
super(RelaxTask, self).fix_ofiles()
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is None:
logger.warning("Cannot find TIM?_DEN files")
return
# Rename last TIMDEN with out_DEN.
ofile = self.outdir.path_in("out_DEN")
if last_timden.path.endswith(".nc"): ofile += ".nc"
self.history.info("Renaming last_denfile %s --> %s" % (last_timden.path, ofile))
os.rename(last_timden.path, ofile)
class DfptTask(AbinitTask):
"""
Base class for DFPT tasks (Phonons, ...)
Mainly used to implement methods that are common to DFPT calculations with Abinit.
Provide the method `open_ddb` that reads and return a Ddb file.
.. warning::
This class should not be instantiated directly.
"""
@property
def ddb_path(self):
"""Absolute path of the DDB file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._ddb_path
except AttributeError:
path = self.outdir.has_abiext("DDB")
if path: self._ddb_path = path
return path
def open_ddb(self):
"""
Open the DDB file located in the in self.outdir.
Returns :class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.ddb_path
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a DDB file in %s" % (self, self.outdir))
return None
# Open the DDB file.
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
class DdeTask(DfptTask):
"""Task for DDE calculations."""
def make_links(self):
"""Replace the default behaviour of make_links"""
for dep in self.deps:
if dep.exts == ["DDK"]:
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif dep.exts == ["WFK"]:
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
if not os.path.exists(self.indir.path_in("in_WFK")):
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
def get_results(self, **kwargs):
results = super(DdeTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDB=(self.outdir.has_abiext("DDE"), "t"))
class DteTask(DfptTask):
"""Task for DTE calculations."""
# @check_spectator
def start(self, **kwargs):
kwargs['autoparal'] = False
return super(DteTask, self).start(**kwargs)
def make_links(self):
"""Replace the default behaviour of make_links"""
for dep in self.deps:
for d in dep.exts:
if d == "DDK":
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif d == "WFK":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
if not os.path.exists(self.indir.path_in("in_WFK")):
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
elif d == "DEN":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("DEN")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
if not os.path.exists(self.indir.path_in("in_DEN")):
os.symlink(out_wfk, self.indir.path_in("in_DEN"))
elif d == "1WF":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("1WF")
if not out_wfk:
raise RuntimeError("%s didn't produce the 1WF file" % gs_task)
dest = self.indir.path_in("in_" + out_wfk.split("_")[-1])
if not os.path.exists(dest):
os.symlink(out_wfk, dest)
elif d == "1DEN":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("DEN")
if not out_wfk:
raise RuntimeError("%s didn't produce the 1WF file" % gs_task)
dest = self.indir.path_in("in_" + out_wfk.split("_")[-1])
if not os.path.exists(dest):
os.symlink(out_wfk, dest)
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
def get_results(self, **kwargs):
results = super(DdeTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDB=(self.outdir.has_abiext("DDE"), "t"))
class DdkTask(DfptTask):
"""Task for DDK calculations."""
color_rgb = np.array((61, 158, 255)) / 255
#@check_spectator
def _on_ok(self):
super(DdkTask, self)._on_ok()
# Copy instead of removing, otherwise optic tests fail
# Fixing this problem requires a rationalization of file extensions.
#if self.outdir.rename_abiext('1WF', 'DDK') > 0:
#if self.outdir.copy_abiext('1WF', 'DDK') > 0:
self.outdir.symlink_abiext('1WF', 'DDK')
def get_results(self, **kwargs):
results = super(DdkTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDK=(self.outdir.has_abiext("DDK"), "t"))
class BecTask(DfptTask):
"""
Task for the calculation of Born effective charges.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
"""
color_rgb = np.array((122, 122, 255)) / 255
def make_links(self):
"""Replace the default behaviour of make_links"""
#print("In BEC make_links")
for dep in self.deps:
if dep.exts == ["DDK"]:
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif dep.exts == ["WFK"]:
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
class PhononTask(DfptTask):
"""
DFPT calculations for a single atomic perturbation.
Provide support for in-place restart via (1WF|1DEN) files
"""
# TODO:
# for the time being we don't discern between GS and PhononCalculations.
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((0, 0, 255)) / 255
def restart(self):
"""
Phonon calculations can be restarted only if we have the 1WF file or the 1DEN file.
from which we can read the first-order wavefunctions or the first order density.
Prefer 1WF over 1DEN since we can reuse the wavefunctions.
"""
# Abinit adds the idir-ipert index at the end of the file and this breaks the extension
# e.g. out_1WF4, out_DEN4. find_1wf_files and find_1den_files returns the list of files found
restart_file, irdvars = None, None
# Highest priority to the 1WF file because restart is more efficient.
wf_files = self.outdir.find_1wf_files()
if wf_files is not None:
restart_file = wf_files[0].path
irdvars = irdvars_for_ext("1WF")
if len(wf_files) != 1:
restart_file = None
logger.critical("Found more than one 1WF file. Restart is ambiguous!")
if restart_file is None:
den_files = self.outdir.find_1den_files()
if den_files is not None:
restart_file = den_files[0].path
irdvars = {"ird1den": 1}
if len(den_files) != 1:
restart_file = None
logger.critical("Found more than one 1DEN file. Restart is ambiguous!")
if restart_file is None:
# Raise because otherwise restart is equivalent to a run from scratch --> infinite loop!
raise self.RestartError("%s: Cannot find the 1WF|1DEN file to restart from." % self)
# Move file.
self.history.info("Will restart from %s", restart_file)
restart_file = self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the Phonon SCF cycle results with matplotlib.
Returns:
`matplotlib` figure, None if some error occurred.
"""
scf_cycle = abiinspect.PhononScfCycle.from_file(self.output_file.path)
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
def get_results(self, **kwargs):
results = super(PhononTask, self).get_results(**kwargs)
return results.register_gridfs_files(DDB=(self.outdir.has_abiext("DDB"), "t"))
def make_links(self):
super(PhononTask, self).make_links()
# fix the problem that abinit uses the 1WF extension for the DDK output file but reads it with the irdddk flag
#if self.indir.has_abiext('DDK'):
# self.indir.rename_abiext('DDK', '1WF')
class EphTask(AbinitTask):
"""
Class for electron-phonon calculations.
"""
color_rgb = np.array((255, 128, 0)) / 255
class ManyBodyTask(AbinitTask):
"""
Base class for Many-body tasks (Screening, Sigma, Bethe-Salpeter)
Mainly used to implement methods that are common to MBPT calculations with Abinit.
.. warning::
This class should not be instantiated directly.
"""
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
"""
# The first digit governs the storage of W(q), the second digit the storage of u(r)
# Try to avoid the storage of u(r) first since reading W(q) from file will lead to a drammatic slowdown.
prev_gwmem = int(self.get_inpvar("gwmem", default=11))
first_dig, second_dig = prev_gwmem // 10, prev_gwmem % 10
if second_dig == 1:
self.set_vars(gwmem="%.2d" % (10 * first_dig))
return True
if first_dig == 1:
self.set_vars(gwmem="%.2d" % 00)
return True
# gwmem 00 d'oh!
return False
class ScrTask(ManyBodyTask):
"""Tasks for SCREENING calculations """
color_rgb = np.array((255, 128, 0)) / 255
#def inspect(self, **kwargs):
# """Plot graph showing the number of q-points computed and the wall-time used"""
@property
def scr_path(self):
"""Absolute path of the SCR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._scr_path
except AttributeError:
path = self.outdir.has_abiext("SCR.nc")
if path: self._scr_path = path
return path
def open_scr(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`ScrFile` object, None if file could not be found or file is not readable.
"""
scr_path = self.scr_path
if not scr_path:
logger.critical("%s didn't produce a SCR.nc file in %s" % (self, self.outdir))
return None
# Open the GSR file and add its data to results.out
from abipy.electrons.scr import ScrFile
try:
return ScrFile(scr_path)
except Exception as exc:
logger.critical("Exception while reading SCR file at %s:\n%s" % (scr_path, str(exc)))
return None
class SigmaTask(ManyBodyTask):
"""
Tasks for SIGMA calculations. Provides support for in-place restart via QPS files
"""
CRITICAL_EVENTS = [
events.QPSConvergenceWarning,
]
color_rgb = np.array((0, 255, 0)) / 255
def restart(self):
# G calculations can be restarted only if we have the QPS file
# from which we can read the results of the previous step.
ext = "QPS"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the QPS file to restart from." % self)
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """Plot graph showing the number of k-points computed and the wall-time used"""
@property
def sigres_path(self):
"""Absolute path of the SIGRES file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._sigres_path
except AttributeError:
path = self.outdir.has_abiext("SIGRES")
if path: self._sigres_path = path
return path
def open_sigres(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`SigresFile` object, None if file could not be found or file is not readable.
"""
sigres_path = self.sigres_path
if not sigres_path:
logger.critical("%s didn't produce a SIGRES file in %s" % (self, self.outdir))
return None
# Open the SIGRES file and add its data to results.out
from abipy.electrons.gw import SigresFile
try:
return SigresFile(sigres_path)
except Exception as exc:
logger.critical("Exception while reading SIGRES file at %s:\n%s" % (sigres_path, str(exc)))
return None
def get_scissors_builder(self):
"""
Returns an instance of :class:`ScissorsBuilder` from the SIGRES file.
Raise:
`RuntimeError` if SIGRES file is not found.
"""
from abipy.electrons.scissors import ScissorsBuilder
if self.sigres_path:
return ScissorsBuilder.from_file(self.sigres_path)
else:
raise RuntimeError("Cannot find SIGRES file!")
def get_results(self, **kwargs):
results = super(SigmaTask, self).get_results(**kwargs)
# Open the SIGRES file and add its data to results.out
with self.open_sigres() as sigres:
#results["out"].update(sigres.as_dict())
results.register_gridfs_files(SIGRES=sigres.filepath)
return results
class BseTask(ManyBodyTask):
"""
Task for Bethe-Salpeter calculations.
.. note::
The BSE codes provides both iterative and direct schemes for the computation of the dielectric function.
The direct diagonalization cannot be restarted whereas Haydock and CG support restarting.
"""
CRITICAL_EVENTS = [
events.HaydockConvergenceWarning,
#events.BseIterativeDiagoConvergenceWarning,
]
color_rgb = np.array((128, 0, 255)) / 255
def restart(self):
"""
BSE calculations with Haydock can be restarted only if we have the
excitonic Hamiltonian and the HAYDR_SAVE file.
"""
# TODO: This version seems to work but the main output file is truncated
# TODO: Handle restart if CG method is used
# TODO: restart should receive a list of critical events
# the log file is complete though.
irdvars = {}
# Move the BSE blocks to indata.
# This is done only once at the end of the first run.
# Successive restarts will use the BSR|BSC files in the indir directory
# to initialize the excitonic Hamiltonian
count = 0
for ext in ("BSR", "BSC"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
# outdir does not contain the BSR|BSC file.
# This means that num_restart > 1 and the files should be in task.indir
count = 0
for ext in ("BSR", "BSC"):
ifile = self.indir.has_abiext(ext)
if ifile:
count += 1
if not count:
raise self.RestartError("%s: Cannot find BSR|BSC files in %s" % (self, self.indir))
# Rename HAYDR_SAVE files
count = 0
for ext in ("HAYDR_SAVE", "HAYDC_SAVE"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
raise self.RestartError("%s: Cannot find the HAYDR_SAVE file to restart from." % self)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
#self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """
# Plot the Haydock iterations with matplotlib.
#
# Returns
# `matplotlib` figure, None if some error occurred.
# """
# haydock_cycle = abiinspect.HaydockIterations.from_file(self.output_file.path)
# if haydock_cycle is not None:
# if "title" not in kwargs: kwargs["title"] = str(self)
# return haydock_cycle.plot(**kwargs)
@property
def mdf_path(self):
"""Absolute path of the MDF file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._mdf_path
except AttributeError:
path = self.outdir.has_abiext("MDF.nc")
if path: self._mdf_path = path
return path
def open_mdf(self):
"""
Open the MDF file located in the in self.outdir.
Returns :class:`MdfFile` object, None if file could not be found or file is not readable.
"""
mdf_path = self.mdf_path
if not mdf_path:
logger.critical("%s didn't produce a MDF file in %s" % (self, self.outdir))
return None
# Open the DFF file and add its data to results.out
from abipy.electrons.bse import MdfFile
try:
return MdfFile(mdf_path)
except Exception as exc:
logger.critical("Exception while reading MDF file at %s:\n%s" % (mdf_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(BseTask, self).get_results(**kwargs)
with self.open_mdf() as mdf:
#results["out"].update(mdf.as_dict())
#epsilon_infinity optical_gap
results.register_gridfs_files(MDF=mdf.filepath)
return results
class OpticTask(Task):
"""
Task for the computation of optical spectra with optic i.e.
RPA without local-field effects and velocity operator computed from DDK files.
"""
color_rgb = np.array((255, 204, 102)) / 255
def __init__(self, optic_input, nscf_node, ddk_nodes, workdir=None, manager=None):
"""
Create an instance of :class:`OpticTask` from an string containing the input.
Args:
optic_input: string with the optic variables (filepaths will be added at run time).
nscf_node: The NSCF task that will produce thw WFK file or string with the path of the WFK file.
ddk_nodes: List of :class:`DdkTask` nodes that will produce the DDK files or list of DDF paths.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
# Convert paths to FileNodes
self.nscf_node = Node.as_node(nscf_node)
self.ddk_nodes = [Node.as_node(n) for n in ddk_nodes]
assert len(ddk_nodes) == 3
#print(self.nscf_node, self.ddk_nodes)
# Use DDK extension instead of 1WF
deps = {n: "1WF" for n in self.ddk_nodes}
#deps = {n: "DDK" for n in self.ddk_nodes}
deps.update({self.nscf_node: "WFK"})
super(OpticTask, self).__init__(optic_input, workdir=workdir, manager=manager, deps=deps)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory of the task."""
super(OpticTask, self).set_workdir(workdir, chroot=chroot)
# Small hack: the log file of optics is actually the main output file.
self.output_file = self.log_file
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Optic does not use `get` or `ird` variables hence we should never try
to change the input when we connect this task
"""
kwargs.update(dict(*args))
self.history.info("OpticTask intercepted set_vars with args %s" % kwargs)
if "autoparal" in kwargs: self.input.set_vars(autoparal=kwargs["autoparal"])
if "max_ncpus" in kwargs: self.input.set_vars(max_ncpus=kwargs["max_ncpus"])
@property
def executable(self):
"""Path to the executable required for running the :class:`OpticTask`."""
try:
return self._executable
except AttributeError:
return "optic"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
#optic.in ! Name of input file
#optic.out ! Unused
#optic ! Root name for all files that will be produced
app(self.input_file.path) # Path to the input file
app(os.path.join(self.workdir, "unused")) # Path to the output file
app(os.path.join(self.workdir, self.prefix.odata)) # Prefix for output data
return "\n".join(lines)
@property
def wfk_filepath(self):
"""Returns (at runtime) the absolute path of the WFK file produced by the NSCF run."""
return self.nscf_node.outdir.has_abiext("WFK")
@property
def ddk_filepaths(self):
"""Returns (at runtime) the absolute path of the DDK files produced by the DDK runs."""
return [ddk_task.outdir.has_abiext("1WF") for ddk_task in self.ddk_nodes]
def make_input(self):
"""Construct and write the input file of the calculation."""
# Set the file paths.
all_files ={"ddkfile_"+str(n+1) : ddk for n,ddk in enumerate(self.ddk_filepaths)}
all_files.update({"wfkfile" : self.wfk_filepath})
files_nml = {"FILES" : all_files}
files= nmltostring(files_nml)
# Get the input specified by the user
user_file = nmltostring(self.input.as_dict())
# Join them.
return files + user_file
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Optic allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def get_results(self, **kwargs):
results = super(OpticTask, self).get_results(**kwargs)
#results.update(
#"epsilon_infinity":
#))
return results
def fix_abicritical(self):
"""
Cannot fix abicritical errors for optic
"""
return 0
#@check_spectator
def reset_from_scratch(self):
"""
restart from scratch, this is to be used if a job is restarted with more resources after a crash
"""
# Move output files produced in workdir to _reset otherwise check_status continues
# to see the task as crashed even if the job did not run
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file", "mpiabort_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
if not self.queue_errors:
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
return
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
for error in self.queue_errors:
logger.info('fixing: %s' % str(error))
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
logger.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
logger.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
logger.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
logger.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the Optic task
This method can change the submission parameters e.g. the number of CPUs for MPI and OpenMp.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# To avoid: ResourceWarning: unclosed file <_io.BufferedReader name=87> in py3k
process.stderr.close()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(list(autoparal_vars.keys()))
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
#optconf = self.find_optconf(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished auto paralell')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
#os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
class AnaddbTask(Task):
"""Task for Anaddb runs (post-processing of DFPT calculations)."""
color_rgb = np.array((204, 102, 255)) / 255
def __init__(self, anaddb_input, ddb_node,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Create an instance of :class:`AnaddbTask` from a string containing the input.
Args:
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept :class:`Task`, :class:`Work` or filepath.
md_node: The node that will produce the MD file (optional). Accept `Task`, `Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept `Task`, `Work` or filepath.
workdir: Path to the working directory (optional).
manager: :class:`TaskManager` object (optional).
"""
# Keep a reference to the nodes.
self.ddb_node = Node.as_node(ddb_node)
deps = {self.ddb_node: "DDB"}
self.gkk_node = Node.as_node(gkk_node)
if self.gkk_node is not None:
deps.update({self.gkk_node: "GKK"})
# I never used it!
self.md_node = Node.as_node(md_node)
if self.md_node is not None:
deps.update({self.md_node: "MD"})
self.ddk_node = Node.as_node(ddk_node)
if self.ddk_node is not None:
deps.update({self.ddk_node: "DDK"})
super(AnaddbTask, self).__init__(input=anaddb_input, workdir=workdir, manager=manager, deps=deps)
@classmethod
def temp_shell_task(cls, inp, ddb_node, mpi_procs=1,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Build a :class:`AnaddbTask` with a temporary workdir. The task is executed via
the shell with 1 MPI proc. Mainly used for post-processing the DDB files.
Args:
mpi_procs: Number of MPI processes to use.
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
See `AnaddbInit` for the meaning of the other arguments.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
return cls(inp, ddb_node,
gkk_node=gkk_node, md_node=md_node, ddk_node=ddk_node,
workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs))
@property
def executable(self):
"""Path to the executable required for running the :class:`AnaddbTask`."""
try:
return self._executable
except AttributeError:
return "anaddb"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
app(self.input_file.path) # 1) Path of the input file
app(self.output_file.path) # 2) Path of the output file
app(self.ddb_filepath) # 3) Input derivative database e.g. t13.ddb.in
app(self.md_filepath) # 4) Output molecular dynamics e.g. t13.md
app(self.gkk_filepath) # 5) Input elphon matrix elements (GKK file)
app(self.outdir.path_join("out")) # 6) Base name for elphon output files e.g. t13
app(self.ddk_filepath) # 7) File containing ddk filenames for elphon/transport.
return "\n".join(lines)
@property
def ddb_filepath(self):
"""Returns (at runtime) the absolute path of the input DDB file."""
# This is not very elegant! A possible approach could to be path self.ddb_node.outdir!
if isinstance(self.ddb_node, FileNode): return self.ddb_node.filepath
path = self.ddb_node.outdir.has_abiext("DDB")
return path if path else "DDB_FILE_DOES_NOT_EXIST"
@property
def md_filepath(self):
"""Returns (at runtime) the absolute path of the input MD file."""
if self.md_node is None: return "MD_FILE_DOES_NOT_EXIST"
if isinstance(self.md_node, FileNode): return self.md_node.filepath
path = self.md_node.outdir.has_abiext("MD")
return path if path else "MD_FILE_DOES_NOT_EXIST"
@property
def gkk_filepath(self):
"""Returns (at runtime) the absolute path of the input GKK file."""
if self.gkk_node is None: return "GKK_FILE_DOES_NOT_EXIST"
if isinstance(self.gkk_node, FileNode): return self.gkk_node.filepath
path = self.gkk_node.outdir.has_abiext("GKK")
return path if path else "GKK_FILE_DOES_NOT_EXIST"
@property
def ddk_filepath(self):
"""Returns (at runtime) the absolute path of the input DKK file."""
if self.ddk_node is None: return "DDK_FILE_DOES_NOT_EXIST"
if isinstance(self.ddk_node, FileNode): return self.ddk_node.filepath
path = self.ddk_node.outdir.has_abiext("DDK")
return path if path else "DDK_FILE_DOES_NOT_EXIST"
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Anaddb allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def open_phbst(self):
"""Open PHBST file produced by Anaddb and returns :class:`PhbstFile` object."""
from abipy.dfpt.phonons import PhbstFile
phbst_path = os.path.join(self.workdir, "run.abo_PHBST.nc")
if not phbst_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhbstFile(phbst_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phbst_path, str(exc)))
return None
def open_phdos(self):
"""Open PHDOS file produced by Anaddb and returns :class:`PhdosFile` object."""
from abipy.dfpt.phonons import PhdosFile
phdos_path = os.path.join(self.workdir, "run.abo_PHDOS.nc")
if not phdos_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhdosFile(phdos_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phdos_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(AnaddbTask, self).get_results(**kwargs)
return results
| mit |
roofit-dev/parallel-roofit-scripts | profiling/vincemark/analyze_d.py | 1 | 12033 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: Patrick Bos
# @Date: 2016-11-16 16:23:55
# @Last Modified by: E. G. Patrick Bos
# @Last Modified time: 2017-06-29 15:46:35
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from pathlib import Path
import itertools
import load_timing
pd.set_option("display.width", None)
def savefig(factorplot, fp):
try:
g.savefig(fp)
print("saved figure using pathlib.Path, apparently mpl is now pep 519 compatible! https://github.com/matplotlib/matplotlib/pull/8481")
except TypeError:
g.savefig(fp.__str__())
"""
cd ~/projects/apcocsm/code/profiling/vincemark && rsync --progress --include='*/' --include='*/*/' --include='timing*.json' --exclude='*' -zavr nikhef:project_atlas/apcocsm_code/profiling/vincemark/vincemark_d ./ && cd -
"""
basepath = Path.home() / 'projects/apcocsm/code/profiling/vincemark/vincemark_d'
savefig_dn = basepath / 'analysis'
savefig_dn.mkdir(parents=True, exist_ok=True)
#### LOAD DATA FROM FILES
fpgloblist = [basepath.glob('%i.allier.nikhef.nl/*.json' % i)
for i in range(18553834, 18553917)]
# for i in itertools.chain(range(18445438, 18445581),
# range(18366732, 18367027))]
drop_meta = ['parallel_interleave', 'seed', 'print_level', 'timing_flag',
'optConst', 'workspace_filepath', 'time_num_ints']
skip_on_match = ['timing_RRMPFE_serverloop_p*.json', # skip timing_flag 8 output (contains no data)
]
if Path('df_numints.hdf').exists():
skip_on_match.append('timings_numInts.json')
dfs_sp, dfs_mp_sl, dfs_mp_ma = load_timing.load_dfs_coresplit(fpgloblist, skip_on_match=skip_on_match, drop_meta=drop_meta)
# #### TOTAL TIMINGS (flag 1)
df_totals_real = pd.concat([dfs_sp['full_minimize'], dfs_mp_ma['full_minimize']])
# ### ADD IDEAL TIMING BASED ON SINGLE CORE RUNS
df_totals_ideal = load_timing.estimate_ideal_timing(df_totals_real, groupby=['N_events', 'segment',
'N_chans', 'N_nuisance_parameters', 'N_bins'],
time_col='walltime_s')
df_totals = load_timing.combine_ideal_and_real(df_totals_real, df_totals_ideal)
# remove summed timings, they show nothing new
df_totals = df_totals[df_totals.segment != 'migrad+hesse+minos']
# # add combination of two categories
# df_totals['timeNIs/Nevents'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_events.astype(str)
# df_totals['timeNIs/Nbins'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_bins.astype(str)
# df_totals['timeNIs/Nnps'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_nuisance_parameters.astype(str)
# df_totals['timeNIs/Nchans'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_chans.astype(str)
#### ANALYSIS
# full timings
# g = sns.factorplot(x='num_cpu', y='walltime_s', col='N_bins', hue='timing_type', row='segment', estimator=np.min, data=df_totals, legend_out=False, sharey='row')
# plt.subplots_adjust(top=0.93)
# g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos')
# savefig(g, savefig_dn / f'total_timing.png')
g = sns.factorplot(x='N_bins', y='walltime_s', col='num_cpu', hue='timing_type', row='segment', estimator=np.min, data=df_totals, legend_out=False, sharey='row', order=range(1,1001))
plt.subplots_adjust(top=0.93)
g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos')
savefig(g, savefig_dn / f'total_timing_vs_bins.png')
g = sns.factorplot(x='N_chans', y='walltime_s', col='num_cpu', hue='timing_type', row='segment', estimator=np.min, data=df_totals, legend_out=False, sharey='row')
plt.subplots_adjust(top=0.93)
g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos')
savefig(g, savefig_dn / f'total_timing_vs_chans.png')
# make a plot per unique combination of parameters (looping is too complicated, since the combination space is sparse)
# # https://stackoverflow.com/a/35268906/1199693
# # for name, group in df_totals.groupby([]):
# for chans in df_totals.N_chans.unique():
# for events in df_totals.N_events.unique():
# for nps in df_totals.N_nuisance_parameters.unique():
# data = df_totals[(df_totals.N_chans == chans) & (df_totals.N_events == events) & (df_totals.N_nuisance_parameters == nps)]
# if len(data) > 0:
# g = sns.factorplot(x='num_cpu', y='walltime_s', col='N_bins', hue='timing_type', row='segment', estimator=np.min, data=data, legend_out=False, sharey='row')
# plt.subplots_adjust(top=0.93)
# g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos --- N_channels = {chans}, N_events = {events}, N_nps = {nps}')
# savefig(g, savefig_dn / f'total_timing_chan{chans}_event{events}_np{nps}.png')
print("Something is not going right with the numerical integral added iteration columns... are they structured the way I thought at all?")
raise SystemExit
#### NUMERICAL INTEGRAL TIMINGS
if not Path('df_numints.hdf').exists():
df_numints = dfs_mp_sl['numInts']
df_numints.to_hdf('df_numints.hdf', 'vincemark_a_numint_timings')
else:
print("loading numerical integral timings from HDF file...")
df_numints = pd.read_hdf('df_numints.hdf', 'vincemark_a_numint_timings')
print("...done")
load_timing.add_iteration_column(df_numints)
df_numints_min_by_iteration = df_numints.groupby('iteration').min()
df_numints_max_by_iteration = df_numints.groupby('iteration').max()
"""
#### RooRealMPFE TIMINGS
### MPFE evaluate @ client (single core) (flags 5 and 6)
mpfe_eval = pd.concat([v for k, v in dfs_mp_ma.items() if 'wall_RRMPFE_evaluate_client' in k] +
[v for k, v in dfs_mp_ma.items() if 'cpu_RRMPFE_evaluate_client' in k])
### add MPFE evaluate full timings (flag 4)
mpfe_eval_full = pd.concat([v for k, v in dfs_mp_ma.items() if 'RRMPFE_evaluate_full' in k])
mpfe_eval_full.rename(columns={'RRMPFE_evaluate_wall_s': 'time s'}, inplace=True)
mpfe_eval_full['cpu/wall'] = 'wall+INLINE'
mpfe_eval_full['segment'] = 'all'
mpfe_eval = mpfe_eval.append(mpfe_eval_full)
### total time per run (== per pid, but the other columns are also grouped-by to prevent from summing over them)
mpfe_eval_total = mpfe_eval.groupby(['pid', 'N_events', 'num_cpu', 'cpu/wall', 'segment', 'force_num_int'], as_index=False).sum()
#### ADD mpfe_eval COLUMN OF CPU_ID, ***PROBABLY***, WHICH SEEMS TO EXPLAIN DIFFERENT TIMINGS QUITE WELL
mpfe_eval_cpu_split = pd.DataFrame(columns=mpfe_eval.columns)
for num_cpu in range(2, 9):
mpfe_eval_num_cpu = mpfe_eval[(mpfe_eval.segment == 'all') * (mpfe_eval.num_cpu == num_cpu)]
mpfe_eval_num_cpu['cpu_id'] = None
for cpu_id in range(num_cpu):
mpfe_eval_num_cpu.iloc[cpu_id::num_cpu, mpfe_eval_num_cpu.columns.get_loc('cpu_id')] = cpu_id
mpfe_eval_cpu_split = mpfe_eval_cpu_split.append(mpfe_eval_num_cpu)
mpfe_eval_cpu_split_total = mpfe_eval_cpu_split.groupby(['pid', 'N_events', 'num_cpu', 'cpu/wall', 'segment', 'cpu_id', 'force_num_int'], as_index=False).sum()
### MPFE calculate
mpfe_calc = pd.concat([v for k, v in dfs_mp_ma.items() if 'RRMPFE_calculate_initialize' in k])
mpfe_calc.rename(columns={'RRMPFE_calculate_initialize_wall_s': 'walltime s'}, inplace=True)
mpfe_calc_total = mpfe_calc.groupby(['pid', 'N_events', 'num_cpu', 'force_num_int'], as_index=False).sum()
#### RooAbsTestStatistic TIMINGS
### RATS evaluate full (flag 2)
rats_eval_sp = dfs_sp['RATS_evaluate_full'].dropna()
rats_eval_ma = dfs_mp_ma['RATS_evaluate_full'].dropna()
# rats_eval_sl is not really a multi-process result, it is just the single process runs (the ppid output in RooFit is now set to -1 if it is not really a slave, for later runs)
# rats_eval_sl = dfs_mp_sl['RATS_evaluate_full'].dropna()
rats_eval = pd.concat([rats_eval_sp, rats_eval_ma])
rats_eval_total = rats_eval.groupby(['pid', 'N_events', 'num_cpu', 'mode', 'force_num_int'], as_index=False).sum()
### RATS evaluate per CPU iteration (multi-process only) (flag 3)
rats_eval_itcpu = rats_eval_itcpu_ma = dfs_mp_ma['RATS_evaluate_mpmaster_perCPU'].copy()
rats_eval_itcpu.rename(columns={'RATS_evaluate_mpmaster_it_wall_s': 'walltime s'}, inplace=True)
# rats_eval_itcpu is counted in the master process, the slaves do nothing (the ppid output is now removed from RooFit, for later runs)
# rats_eval_itcpu_sl = dfs_mp_sl['RATS_evaluate_mpmaster_perCPU']
rats_eval_itcpu_total = rats_eval_itcpu.groupby(['pid', 'N_events', 'num_cpu', 'it_nr', 'force_num_int'], as_index=False).sum()
"""
#### ANALYSIS
"""
# RATS evaluate full times
g = sns.factorplot(x='num_cpu', y='RATS_evaluate_wall_s', col='N_events', hue='mode', row='force_num_int', estimator=np.min, data=rats_eval_total, legend_out=False, sharey=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('total wallclock timing of all calls to RATS::evaluate()')
savefig(g, savefig_dn / 'rats_eval.png')
# RATS evaluate itX times
g = sns.factorplot(x='num_cpu', y='walltime s', hue='it_nr', col='N_events', row='force_num_int', estimator=np.min, data=rats_eval_itcpu_total, legend_out=False, sharey=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('total wallclock timing of the iterations of the main for-loop in RATS::evaluate()')
savefig(g, savefig_dn / 'rats_eval_itcpu.png')
# MPFE evaluate timings (including "collect" time)
for segment in mpfe_eval_total.segment.unique():
g = sns.factorplot(x='num_cpu', y='time s', hue='cpu/wall', col='N_events', row='force_num_int', estimator=np.min, data=mpfe_eval_total[mpfe_eval_total.segment == segment], legend_out=False, sharey=False)
plt.subplots_adjust(top=0.95)
g.fig.suptitle('total timings of all calls to RRMPFE::evaluate(); "COLLECT"')
savefig(g, savefig_dn / f'mpfe_eval_{segment}.png')
# ... split by cpu id
g = sns.factorplot(x='num_cpu', y='time s', hue='cpu_id', col='N_events', row='force_num_int', estimator=np.min, data=mpfe_eval_cpu_split_total[(mpfe_eval_cpu_split_total['cpu/wall'] == 'wall')], legend_out=False, sharey=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('total wallclock timing of all calls to RRMPFE::evaluate(); only wallclock and only all-segment timings')
savefig(g, savefig_dn / f'mpfe_eval_cpu_split.png')
# MPFE calculate timings ("dispatch" time)
g = sns.factorplot(x='num_cpu', y='walltime s', col='N_events', row='force_num_int', sharey='row', estimator=np.min, data=mpfe_calc_total, legend_out=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('total wallclock timing of all calls to RRMPFE::calculate(); "DISPATCH"')
savefig(g, savefig_dn / 'mpfe_calc.png')
"""
# numerical integrals
g = sns.factorplot(x='num_cpu', y='wall_s', col='N_events', sharey='row', row='time_num_ints/segment', estimator=np.min, data=df_numints, legend_out=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('wallclock timing of all timed numerical integrals --- minima of all integrations per plotted factor --- vertical bars: variation in different runs and iterations')
savefig(g, savefig_dn / 'numInts_min.png')
g = sns.factorplot(x='num_cpu', y='wall_s', col='N_events', sharey='row', row='time_num_ints/segment', estimator=np.max, data=df_numints, legend_out=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('wallclock timing of all timed numerical integrals --- maxima of all integrations per plotted factor --- vertical bars: variation in different runs and iterations')
savefig(g, savefig_dn / 'numInts_max.png')
g = sns.factorplot(x='num_cpu', y='wall_s', col='N_events', sharey='row', row='time_num_ints/segment', estimator=np.sum, data=df_numints_max_by_iteration, legend_out=False)
plt.subplots_adjust(top=0.8)
g.fig.suptitle('wallclock timing of all timed numerical integrals --- sum of maximum of each iteration per run $\sum_{\mathrm{it}} \max_{\mathrm{core}}(t_{\mathrm{run,it,core}})$ --- vertical bars: variation in different runs')
savefig(g, savefig_dn / 'numInts_it_sum_max.png')
plt.show()
| apache-2.0 |
leggitta/mne-python | mne/viz/tests/test_evoked.py | 2 | 4306 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from mne import io, read_events, Epochs, pick_types, read_cov
from mne.viz.utils import _fake_click
from mne.utils import slow_test, run_tests_if_main
from mne.channels import read_layout
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.1
n_chan = 6
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
# Use a subset of channels for plotting speed
picks = picks[np.round(np.linspace(0, len(picks) - 1, n_chan)).astype(int)]
picks[0] = 2 # make sure we have a magnetometer
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
epochs.info['bads'] = [epochs.ch_names[-1]]
return epochs
def _get_epochs_delayed_ssp():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject)
return epochs_delayed_ssp
@slow_test
def test_plot_evoked():
"""Test plotting of evoked
"""
import matplotlib.pyplot as plt
evoked = _get_epochs().average()
with warnings.catch_warnings(record=True):
fig = evoked.plot(proj=True, hline=[1], exclude=[])
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded
evoked.plot(exclude='bads')
evoked.plot(exclude=evoked.info['bads']) # does the same thing
# test selective updating of dict keys is working.
evoked.plot(hline=[1], units=dict(mag='femto foo'))
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
evoked_delayed_ssp.plot(proj='interactive')
evoked_delayed_ssp.apply_proj()
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
evoked_delayed_ssp.info['projs'] = []
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive', axes='foo')
evoked.plot_image(proj=True)
# plot with bad channels excluded
evoked.plot_image(exclude='bads')
evoked.plot_image(exclude=evoked.info['bads']) # does the same thing
plt.close('all')
evoked.plot_topo() # should auto-find layout
plt.close('all')
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
evoked.plot_white(cov)
evoked.plot_white([cov, cov])
# Hack to test plotting of maxfiltered data
evoked_sss = evoked.copy()
evoked_sss.info['proc_history'] = [dict(max_info=None)]
evoked_sss.plot_white(cov)
evoked_sss.plot_white(cov_fname)
plt.close('all')
run_tests_if_main()
| bsd-3-clause |
ashhher3/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 28 | 10792 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
fabiolapozyk/IncrementalFCM | Tesi/FCM/old-fcm.py | 2 | 4479 | '''
Created on 07/mag/2014
@author: Fabio
'''
'''
Created on 03/mar/2014
@author:Sonya
'''
import numpy
import matplotlib.pyplot as plt
from numpy.random.mtrand import np
import pylab as pl
import random
from sklearn import datasets
from sklearn.decomposition import PCA
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: fuzzy/cmeans.py
# Fuzzy C-Means algorithm
################################################################################
# Doc string, reStructuredText formatted:
################################################################################
from numpy import dot, array, sum, zeros, outer, any
################################################################################
# Fuzzy C-Means class
################################################################################
class FuzzyCMeans(object):
def __init__(self, training_set, c, m=2.):
'x il data set Y avente N X n'
self.__x = array(training_set)
' mu corrisponde alla matrice di appartenza U di dimensione N x c'
'c un input di init ed il numero di cluster'
self.__mu = array(self.initMembership(self.__x.shape[0], c))
' m il coefficiente di fuzzyness'
self.m = m
''' in questo caso c l' insieme dei centroidi e quindi una matrice c X n'''
self.__c = self.centers()
'dichiarazione di variabile di istanza'
def __getc(self):
return self.__c
def __setc(self, c):
self.__c = array(c).reshape(self.__c.shape)
c = property(__getc, __setc)
def __getmu(self):
return self.__mu
mu = property(__getmu, None)
def __getx(self):
return self.__x
x = property(__getx, None)
'metodo che calcola i centroidi'
def centers(self):
mm = self.__mu ** self.m
c = dot(self.__x.T, mm) / sum(mm, axis=0)
self.__c = c.T
return self.__c
'Metodo che calcola la matrice di appartenenza'
def membership(self):
x = self.__x
c = self.__c
M, _ = x.shape
C, _ = c.shape
r = zeros((M, C))
m1 = 1. / (self.m - 1.)
for k in range(M):
den = sum((x[k] - c) ** 2., axis=1)
if any(den == 0):
return self.__mu
frac = outer(den, 1. / den) ** m1
r[k, :] = 1. / sum(frac, axis=1)
self.__mu = r
return self.__mu
def __call__(self, emax=0.001, imax=30):
error = 1.
i = 0
while error > emax and i < imax:
error = self.step()
i = i + 1
print("Numero di iterazioni eseguite: " + str(i))
return self.c
def step(self):
old = self.__mu
self.membership()
self.centers()
return sum((self.__mu - old) ** 2.) ** 1 / 2.
def initMembership(self, n, c):
u = zeros((n, c))
if(c != 0):
for i in range(n):
somma = 0.0
numCasuale = random.randint(0, (9 / c) + 1)
for j in range(c):
if(j != c - 1):
u[i, j] = round(numCasuale / 10., 2)
somma = somma + numCasuale
numCasuale = random.randint(0, 9 - somma)
else: u[i, j] = round((10 - somma) / 10, 2)
return u
def getClusters(self):
return np.argmax(self.__mu, axis=1)
################################################################################
# Test.
if __name__ == "__main__":
matrix=array(
[[1 , 3 ],
[1 , 5 ],
[1 , 7 ],
[5 , 3 ],
[10, 11]]);
p1 = FuzzyCMeans(matrix, 3)
centr = p1()
memberShip = p1.mu
print("Matrice di membership:\n")
for i in range(memberShip.shape[0]):
t = []
for j in range(memberShip.shape[1]):
t.append(round(memberShip[i, j] , 3))
print(t)
plt.scatter(matrix[:, 0], matrix[:, 1], )
plt.scatter(centr[:, 0], centr[:, 1], c='Red')
plt.show()
| cc0-1.0 |
NunoEdgarGub1/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
dsullivan7/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
SPJ-AI/lesson | training_python/mlp_text.py | 1 | 3129 | # -*- coding: utf-8 -*-
#! /usr/bin/python
import MeCab # TokenizerとしてMeCabを使用
from sklearn.neural_network import MLPClassifier
mecab = MeCab.Tagger("-Ochasen") # MeCabのインスタンス化
f = open('text.tsv') # トレーニングファイルの読み込み
lines = f.readlines()
words = [] # 単語トークン表層一覧を保持するリスト
count = 0
dict = {} # テキスト:カテゴリのペアを保持する辞書
for line in lines:
count += 1
if count == 1:
continue # ヘッダをSkip
split = line.split("\t")
if len(split) < 2:
continue
dict[split[0].strip()] = split[1].strip() # テキスト:カテゴリのペアを格納
tokens = mecab.parse(split[0].strip()) # テキストの形態素解析
token = tokens.split("\n")
for ele in token:
element = ele.split("\t")
surface = element[0] #トークンの表層
if surface == "EOS":
break
if surface not in words:
words.append(surface) #表層の属するカテゴリを格納
f.close()
#print(words) # リストの中身の確認
data_array = [] # ベクトル化されたトレーニングデータをストアする配列
target_array = [] # ベクトル化された正解データをストアする配列
category_array = [] # 分類対象カテゴリ一覧をダブりなくストアする配列
for category in dict.values():
if category not in category_array:
category_array.append(category)
for text in dict.keys():
print(text)
entry_array = [0] * len(words) # 初期値0の配列を、wordsの長さ分生成(空ベクトル)
target_array.append(category_array.index(dict[text])) # カテゴリ配列のインデックス番号をストア
tokens = mecab.parse(text) # テキストの形態素解析
token = tokens.split("\n")
for ele in token:
element = ele.split("\t")
surface = element[0] #トークンの表層
if surface == "EOS":
break
try:
index = words.index(surface)
entry_array[index] += 1
except Exception as e:
print(str(e))
continue
data_array.append(entry_array)
print(data_array)
print(category_array)
print(target_array)
#アルゴリズムとしてmlpを使用
clf = MLPClassifier(max_iter=50,hidden_layer_sizes=(100,))
clf.fit(data_array, target_array) #トレーニングデータを全て学習
query = "人工知能は人間を近々凌駕する"
query_array = [0] * len(words) # ベクトル化したクエリを格納する配列
tokens = mecab.parse(query) # クエリの形態素解析
token = tokens.split("\n")
for ele in token:
element = ele.split("\t")
surface = element[0] #トークンの表層
if surface == "EOS":
break
try:
index = words.index(surface)
query_array[index] += 1
except Exception as e:
print(str(e))
continue
print(query_array)
res = clf.predict(query_array) #トレーニングデータの最後のエントリの値を予測
print(res)
print(category_array[res[0]]) | gpl-3.0 |
lawrencejones/neuro | Exercise_4/neuro/Plotters.py | 1 | 1860 | import matplotlib.pyplot as plt
import numpy as np
def plot_connectivity_matrix(CIJ):
"""
Plots a scatter matrix
"""
x, y = np.where(CIJ == 1)
plt.axis([0, len(CIJ), 0, len(CIJ[0])])
plt.scatter(x, y)
return plt
def plot_module_mean_firing_rate(layer, no_of_modules, resolution=None):
"""
Plots the mean firing
no_of_modules -- # of modules to run mean firing rate for
resolution -- [sample_every_n_steps, window_size_of_sample]
"""
n_steps, window_size = resolution
window_buffer = window_size / 2
max_spike_time = np.max(layer.firings[:, 0])
duration = 100 * (1 + max_spike_time / 100)
firings = layer.firings
sampling_ts = range(window_buffer, duration - window_buffer, n_steps)
firing_rates = np.zeros((len(sampling_ts), no_of_modules))
module_size = layer.N / no_of_modules
for i, t in enumerate(sampling_ts):
firings_after_start = firings[firings[:, 0] > t - window_buffer]
firings_in_window = firings_after_start[firings_after_start[:, 0] < t + window_buffer]
for module_index, module_base in enumerate(range(0, layer.N, module_size)):
firings_from_module = np.where(np.logical_and(
firings_in_window >= module_base,
firings_in_window < module_base + module_size))[0]
firing_rates[i][module_index] = len(firings_from_module)
plt.ylabel('Mean firing rate')
plt.xlabel('Time (ms) + 0s')
plt.plot(sampling_ts, firing_rates)
return plt
def plot_firings(layer, duration):
"""
Plots the firing events of every neuron in the given layer
"""
plt.scatter(layer.firings[:, 0], layer.firings[:, 1] + 1, marker='.')
plt.xlim(0, duration)
plt.ylabel('Neuron number')
plt.xlabel('Time (ms) + 0s')
plt.ylim(0, layer.N + 1)
return plt
| gpl-3.0 |
nhuntwalker/astroML | book_figures/chapter2/fig_sort_scaling.py | 3 | 2889 | """
Sort Algorithm Scaling
----------------------
Figure 2.2.
The scaling of the quicksort algorithm. Plotted for comparison are
lines showing O(N) and O(N log N) scaling. The quicksort algorithm falls along
the O(N log N) line, as expected.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from time import time
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Compute the execution times as a function of array size
# time quick-sort of a numpy array
N_npy = 10 ** np.linspace(5, 7, 10)
time_npy = np.zeros_like(N_npy)
for i in range(len(N_npy)):
x = np.random.random(int(N_npy[i]))
t0 = time()
x.sort(kind='quicksort')
t1 = time()
time_npy[i] = t1 - t0
# time built-in sort of python list
N_list = N_npy[:-3]
time_list = np.zeros_like(N_list)
for i in range(len(N_list)):
x = list(np.random.random(int(N_list[i])))
t0 = time()
x.sort()
t1 = time()
time_list[i] = t1 - t0
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(bottom=0.15)
ax = plt.axes(xscale='log', yscale='log')
ax.grid()
# plot the observed times
ax.plot(N_list, time_list, 'sk', color='gray', ms=5, label='list sort')
ax.plot(N_npy, time_npy, 'ok', color='gray', ms=5, label='NumPy sort')
# plot the expected scalings
scale = np.linspace(N_npy[0] / 2, N_npy[-1] * 2, 100)
scaling_N = scale * time_npy[0] / N_npy[0]
scaling_NlogN = (scale * np.log2(scale) * time_npy[0]
/ N_npy[0] / np.log2(N_npy[0]))
ax.plot(scale, scaling_NlogN, '--k', label=r'$\mathcal{O}[N \log N]$')
ax.plot(scale, scaling_N, ':k', label=r'$\mathcal{O}[N]$')
scaling_N = scale * time_list[0] / N_list[0]
scaling_NlogN = (scale * np.log2(scale) * time_list[0]
/ N_list[0] / np.log2(N_list[0]))
ax.plot(scale, scaling_NlogN, '--k')
ax.plot(scale, scaling_N, ':k')
# Create titles and labels
ax.set_title("Scaling of Sort Algorithms")
ax.set_xlabel('Length of Array')
ax.set_ylabel('Relative sort time')
plt.legend(loc='upper left')
ax.set_xlim(scale[0], scale[-1])
plt.show()
| bsd-2-clause |
CDSFinance/zipline | user_scripts/dma.py | 1 | 1162 | from zipline.api import order_target, record, symbol, history, add_history
def initialize(context):
# Register 2 histories that track daily prices,
# one with a 100 window and one with a 300 day window
add_history(100, '1d', 'price')
add_history(300, '1d', 'price')
context.i = 0
def handle_data(context, data):
print context.portfolio.portfolio_value
# Skip first 300 days to get full windows
context.i += 1
if context.i < 300:
return
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
short_mavg = history(100, '1d', 'price').mean()
long_mavg = history(300, '1d', 'price').mean()
sym = symbol('AAPL')
# Trading logic
if short_mavg[sym] > long_mavg[sym]:
# order_target orders as many shares as needed to
# achieve the desired number of shares.
order_target(sym, 100)
elif short_mavg[sym] < long_mavg[sym]:
order_target(sym, 0)
# Save values for later inspection
record(AAPL=data[sym].price,
short_mavg=short_mavg[sym],
long_mavg=long_mavg[sym])
| apache-2.0 |
zooniverse/aggregation | experimental/condor/condorIBCC_3.py | 2 | 5894 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import matplotlib.cbook as cbook
import random
import datetime
import bisect
def run_ibcc(t):
with open(base_directory+"/Databases/condor_ibcc_"+t+".py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \""+base_directory+"/Databases/condor_ibcc_"+t+".csv\"\n")
f.write("outputFile = \""+base_directory+"/Databases/condor_ibcc_"+t+".out\"\n")
f.write("confMatFile = \""+base_directory+"/Databases/condor_ibcc_"+t+".mat\"\n")
f.write("nu0 = np.array([30,70])\n")
f.write("alpha0 = np.array([[3, 1], [1,3]])\n")
#start by removing all temp files
try:
os.remove(base_directory+"/Databases/condor_ibcc_"+t+".out")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/condor_ibcc_"+t+".mat")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/condor_ibcc_"+t+".csv.dat")
except OSError:
pass
ibcc.runIbcc(base_directory+"/Databases/condor_ibcc_"+t+".py")
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
sys.path.append(base_directory+"/github/pyIBCC/python")
import ibcc
client = pymongo.MongoClient()
db = client['condor_2014-11-10']
classification_collection = db["condor_classifications"]
subject_collection = db["condor_subjects"]
classification_record = {}
f_gold = open(base_directory+"/Databases/condor_ibcc_gold.csv","wb")
f_gold.write("a,b,c\n")
f_sample = open(base_directory+"/Databases/condor_ibcc_sample.csv","wb")
f_sample.write("a,b,c\n")
#start by finding all subjects which received at least two classifications before Sept 15th
for classification in classification_collection.find():
if classification["created_at"] >= datetime.datetime(2014,9,15):
break
if classification["subjects"] == []:
continue
zooniverse_id = classification["subjects"][0]["zooniverse_id"]
if not("user_name" in classification):
continue
user_ip = classification["user_ip"]
#make sure this user's annotations have animal types associated with them
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
found_condor = "0"
for animal in markings.values():
try:
animal_type = animal["animal"]
except KeyError:
continue
if animal_type == "condor":
found_condor = "1"
break
#if we got this far, the user does have animal types associated with their markings
if not(zooniverse_id in classification_record):
classification_record[zooniverse_id] = [user_ip]
else:
classification_record[zooniverse_id].append(user_ip)
except ValueError:
pass
for zooniverse_id in classification_record.keys():
if len(classification_record[zooniverse_id]) <= 2:
del classification_record[zooniverse_id]
to_sample_from = [k for k in classification_record if len(classification_record[k]) >= 2]
print len(to_sample_from)
gold_users = []
sample_users = []
for subject_index,zooniverse_id in enumerate(random.sample(to_sample_from,500)):
#choose 2 classifications at random
sampling = random.sample(classification_record[zooniverse_id],3)
already_done = []
#["subjects"][0]["zooniverse_id"]
for classification in classification_collection.find({"subjects.zooniverse_id":zooniverse_id}):
if classification["created_at"] >= datetime.datetime(2014,9,15):
continue
user_ip = classification["user_ip"]
if user_ip in already_done:
continue
else:
already_done.append(user_ip)
#user_index = index(ip_listing,user_ip)
#check to see if there are any markings
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
except ValueError:
continue
found_condor = "0"
for animal in markings.values():
try:
animal_type = animal["animal"]
except KeyError:
continue
if animal_type == "condor":
found_condor = "1"
break
if user_ip in sampling:
if not(user_ip in sample_users):
sample_users.append(user_ip)
f_sample.write(str(sample_users.index(user_ip))+","+str(subject_index)+","+found_condor+"\n")
#else:
if not(user_ip in gold_users):
gold_users.append(user_ip)
f_gold.write(str(gold_users.index(user_ip))+","+str(subject_index)+","+found_condor+"\n")
#gold standard first
f_gold.close()
run_ibcc("gold")
f_sample.close()
run_ibcc("sample")
f = open(base_directory+"/Databases/condor_ibcc_gold.out","r")
f2 = open(base_directory+"/Databases/condor_ibcc_sample.out","r")
X = []
Y = []
while True:
line = f.readline()
line2 = f2.readline()
if not line:
break
words = line[:-1].split(" ")
s1 = int(float(words[0]))
p1 = float(words[2])
X.append(p1)
words = line2[:-1].split(" ")
s1 = int(float(words[0]))
p1 = float(words[2])
Y.append(p1)
plt.plot(X,Y,'.')
plt.show()
| apache-2.0 |
smdabdoub/phylotoast | bin/PCoA.py | 2 | 9658 | #!/usr/bin/env python
import argparse
from collections import OrderedDict
import itertools
import sys
from phylotoast import util, graph_util as gu
errors = []
try:
from palettable.colorbrewer.qualitative import Set3_12
except ImportError as ie:
errors.append("No module named palettable")
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
except ImportError as ie:
errors.append(ie)
if len(errors) != 0:
for item in errors:
print("Import Error:", item)
sys.exit()
def handle_program_options():
parser = argparse.ArgumentParser(description="Create a 2D or 3D PCoA plot. By default"
", this script opens a window with the plot "
"displayed if you want to change certain aspects of "
"the plot (such as rotate the view in 3D mode). If "
"the -o option is specified, the plot will be saved "
"directly to an image without the initial display "
"window.")
parser.add_argument("-i", "--coord_fp", required=True,
help="Input principal coordinates filepath (i.e. resulting file "
"from principal_coordinates.py) [REQUIRED].")
parser.add_argument("-m", "--map_fp", required=True,
help="Input metadata mapping filepath [REQUIRED].")
parser.add_argument("-g", "--group_by", required=True,
help="Any mapping categories, such as treatment type, that will "
"be used to group the data in the output iTol table. For example,"
" one category with three types will result in three data columns"
" in the final output. Two categories with three types each will "
"result in six data columns. Default is no categories and all the"
" data will be treated as a single group.")
parser.add_argument("-d", "--dimensions", default=2, type=int, choices=[2, 3],
help="Choose whether to plot 2D or 3D.")
parser.add_argument("-c", "--colors", default=None,
help="A column name in the mapping file containing hexadecimal "
"(#FF0000) color values that will be used to color the groups. "
"Each sample ID must have a color entry.")
parser.add_argument("-s", "--point_size", default=100, type=int,
help="Specify the size of the circles representing each of the "
"samples in the plot")
parser.add_argument("--pc_order", default=[1, 2], type=int, nargs=2,
help="Choose which Principle Coordinates are displayed and in "
"which order, for example: 1 2. This option is only used when a "
"2D plot is specified.")
parser.add_argument("--x_limits", type=float, nargs=2,
help="Specify limits for the x-axis instead of automatic setting "
"based on the data range. Should take the form: --x_limits -0.5 "
"0.5")
parser.add_argument("--y_limits", type=float, nargs=2,
help="Specify limits for the y-axis instead of automatic setting "
"based on the data range. Should take the form: --y_limits -0.5 "
"0.5")
parser.add_argument("--z_limits", type=float, nargs=2,
help="Specify limits for the z-axis instead of automatic setting "
"based on the data range. Should take the form: --z_limits -0.5 "
"0.5")
parser.add_argument("--z_angles", type=float, nargs=2, default=[-134.5, 23.],
help="Specify the azimuth and elevation angles for a 3D plot.")
parser.add_argument("-t", "--title", default="", help="Title of the plot.")
parser.add_argument("--figsize", default=[14, 8], type=int, nargs=2,
help="Specify the 'width height' in inches for PCoA plots. "
"Default figure size is 14x8 inches")
parser.add_argument("--font_size", default=12, type=int,
help="Sets the font size for text elements in the plot.")
parser.add_argument("--label_padding", default=15, type=int,
help="Sets the spacing in points between the each axis and its "
"label.")
parser.add_argument("--annotate_points", action="store_true",
help="If specified, each graphed point will be labeled with its "
"sample ID.")
parser.add_argument("--ggplot2_style", action="store_true",
help="Apply ggplot2 styling to the figure.")
parser.add_argument("-o", "--out_fp", default=None,
help="The path and file name to save the plot under. If specified"
", the figure will be saved directly instead of opening a window "
"in which the plot can be viewed before saving.")
return parser.parse_args()
def main():
args = handle_program_options()
try:
with open(args.coord_fp):
pass
except IOError as ioe:
err_msg = "\nError in input principal coordinates filepath (-i): {}\n"
sys.exit(err_msg.format(ioe))
try:
with open(args.map_fp):
pass
except IOError as ioe:
err_msg = "\nError in input metadata mapping filepath (-m): {}\n"
sys.exit(err_msg.format(ioe))
with open(args.coord_fp) as F:
pcd = F.readlines()
pcd = [line.split("\t") for line in pcd]
map_header, imap = util.parse_map_file(args.map_fp)
data_gather = util.gather_categories(imap, map_header,
args.group_by.split(","))
categories = OrderedDict([(condition, {"pc1": [], "pc2": [], "pc3": []})
for condition in data_gather.keys()])
bcolors = itertools.cycle(Set3_12.hex_colors)
if not args.colors:
colors = [bcolors.next() for _ in categories]
else:
colors = util.color_mapping(imap, map_header,
args.group_by, args.colors)
colors = colors.values()
parsed_unifrac = util.parse_unifrac(args.coord_fp)
pco = args.pc_order
if args.dimensions == 3:
pco.append(3)
pc1v = parsed_unifrac["varexp"][pco[0] - 1]
pc2v = parsed_unifrac["varexp"][pco[1] - 1]
if args.dimensions == 3:
pc3v = parsed_unifrac["varexp"][pco[2] - 1]
for sid, points in parsed_unifrac["pcd"].items():
for condition, dc in data_gather.items():
if sid in dc.sids:
cat = condition
break
categories[cat]["pc1"].append((sid, points[pco[0] - 1]))
categories[cat]["pc2"].append((sid, points[pco[1] - 1]))
if args.dimensions == 3:
categories[cat]["pc3"].append((sid, points[pco[2] - 1]))
axis_str = "PC{} (Percent Explained Variance {:.3f}%)"
# initialize plot
fig = plt.figure(figsize=args.figsize)
if args.dimensions == 3:
ax = fig.add_subplot(111, projection="3d")
ax.view_init(elev=args.z_angles[1], azim=args.z_angles[0])
ax.set_zlabel(axis_str.format(3, pc3v), labelpad=args.label_padding)
if args.z_limits:
ax.set_zlim(args.z_limits)
else:
ax = fig.add_subplot(111)
# plot data
for i, cat in enumerate(categories):
if args.dimensions == 3:
ax.scatter(xs=[e[1] for e in categories[cat]["pc1"]],
ys=[e[1] for e in categories[cat]["pc2"]],
zs=[e[1] for e in categories[cat]["pc3"]],
zdir="z", c=colors[i], s=args.point_size, label=cat,
edgecolors="k")
else:
ax.scatter([e[1] for e in categories[cat]["pc1"]],
[e[1] for e in categories[cat]["pc2"]],
c=colors[i], s=args.point_size, label=cat, edgecolors="k")
# Script to annotate PCoA sample points.
if args.annotate_points:
for x, y in zip(categories[cat]["pc1"], categories[cat]["pc2"]):
ax.annotate(
x[0], xy=(x[1], y[1]), xytext=(-10, -15),
textcoords="offset points", ha="center", va="center",
)
# customize plot options
if args.x_limits:
ax.set_xlim(args.x_limits)
if args.y_limits:
ax.set_ylim(args.y_limits)
ax.set_xlabel(axis_str.format(pco[0], float(pc1v)), labelpad=args.label_padding)
ax.set_ylabel(axis_str.format(pco[1], float(pc2v)), labelpad=args.label_padding)
leg = plt.legend(loc="best", scatterpoints=3, frameon=True, framealpha=1)
leg.get_frame().set_edgecolor('k')
# Set the font characteristics
font = {"family": "normal", "weight": "bold", "size": args.font_size}
mpl.rc("font", **font)
if args.title:
ax.set_title(args.title)
if args.ggplot2_style and not args.dimensions == 3:
gu.ggplot2_style(ax)
# save or display result
if args.out_fp:
fig.savefig(args.out_fp, facecolor="white", edgecolor="none", bbox_inches="tight",
pad_inches=0.2)
else:
plt.show()
if __name__ == "__main__":
sys.exit(main())
| mit |
Sterncat/opticspy | opticspy/mplot3d/proj3d.py | 9 | 7006 | #!/usr/bin/python
# 3dproj.py
#
"""
Various transforms used for by the 3D code
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib.collections import LineCollection
from matplotlib.patches import Circle
import numpy as np
import numpy.linalg as linalg
def line2d(p0, p1):
"""
Return 2D equation of line in the form ax+by+c = 0
"""
# x + x1 = 0
x0, y0 = p0[:2]
x1, y1 = p1[:2]
#
if x0 == x1:
a = -1
b = 0
c = x1
elif y0 == y1:
a = 0
b = 1
c = -y1
else:
a = (y0-y1)
b = (x0-x1)
c = (x0*y1 - x1*y0)
return a, b, c
def line2d_dist(l, p):
"""
Distance from line to point
line is a tuple of coefficients a,b,c
"""
a, b, c = l
x0, y0 = p
return abs((a*x0 + b*y0 + c)/np.sqrt(a**2+b**2))
def line2d_seg_dist(p1, p2, p0):
"""distance(s) from line defined by p1 - p2 to point(s) p0
p0[0] = x(s)
p0[1] = y(s)
intersection point p = p1 + u*(p2-p1)
and intersection point lies within segment if u is between 0 and 1
"""
x21 = p2[0] - p1[0]
y21 = p2[1] - p1[1]
x01 = np.asarray(p0[0]) - p1[0]
y01 = np.asarray(p0[1]) - p1[1]
u = (x01*x21 + y01*y21)/float(abs(x21**2 + y21**2))
u = np.clip(u, 0, 1)
d = np.sqrt((x01 - u*x21)**2 + (y01 - u*y21)**2)
return d
def test_lines_dists():
import pylab
ax = pylab.gca()
xs, ys = (0,30), (20,150)
pylab.plot(xs, ys)
points = list(zip(xs, ys))
p0, p1 = points
xs, ys = (0,0,20,30), (100,150,30,200)
pylab.scatter(xs, ys)
dist = line2d_seg_dist(p0, p1, (xs[0], ys[0]))
dist = line2d_seg_dist(p0, p1, np.array((xs, ys)))
for x, y, d in zip(xs, ys, dist):
c = Circle((x, y), d, fill=0)
ax.add_patch(c)
pylab.xlim(-200, 200)
pylab.ylim(-200, 200)
pylab.show()
def mod(v):
"""3d vector length"""
return np.sqrt(v[0]**2+v[1]**2+v[2]**2)
def world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax):
dx, dy, dz = (xmax-xmin), (ymax-ymin), (zmax-zmin)
return np.array([
[1.0/dx,0,0,-xmin/dx],
[0,1.0/dy,0,-ymin/dy],
[0,0,1.0/dz,-zmin/dz],
[0,0,0,1.0]])
def test_world():
xmin, xmax = 100, 120
ymin, ymax = -100, 100
zmin, zmax = 0.1, 0.2
M = world_transformation(xmin, xmax, ymin, ymax, zmin, zmax)
print(M)
def view_transformation(E, R, V):
n = (E - R)
## new
# n /= mod(n)
# u = np.cross(V,n)
# u /= mod(u)
# v = np.cross(n,u)
# Mr = np.diag([1.]*4)
# Mt = np.diag([1.]*4)
# Mr[:3,:3] = u,v,n
# Mt[:3,-1] = -E
## end new
## old
n = n / mod(n)
u = np.cross(V, n)
u = u / mod(u)
v = np.cross(n, u)
Mr = [[u[0],u[1],u[2],0],
[v[0],v[1],v[2],0],
[n[0],n[1],n[2],0],
[0, 0, 0, 1],
]
#
Mt = [[1, 0, 0, -E[0]],
[0, 1, 0, -E[1]],
[0, 0, 1, -E[2]],
[0, 0, 0, 1]]
## end old
return np.dot(Mr, Mt)
def persp_transformation(zfront, zback):
a = (zfront+zback)/(zfront-zback)
b = -2*(zfront*zback)/(zfront-zback)
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,a,b],
[0,0,-1,0]
])
def proj_transform_vec(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here..
txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
return txs, tys, tzs
def proj_transform_vec_clip(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here..
txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
tis = (vecw[0] >= 0) * (vecw[0] <= 1) * (vecw[1] >= 0) * (vecw[1] <= 1)
if np.sometrue(tis):
tis = vecw[1] < 1
return txs, tys, tzs, tis
def inv_transform(xs, ys, zs, M):
iM = linalg.inv(M)
vec = vec_pad_ones(xs, ys, zs)
vecr = np.dot(iM, vec)
try:
vecr = vecr/vecr[3]
except OverflowError:
pass
return vecr[0], vecr[1], vecr[2]
def vec_pad_ones(xs, ys, zs):
try:
try:
vec = np.array([xs,ys,zs,np.ones(xs.shape)])
except (AttributeError,TypeError):
vec = np.array([xs,ys,zs,np.ones((len(xs)))])
except TypeError:
vec = np.array([xs,ys,zs,1])
return vec
def proj_transform(xs, ys, zs, M):
"""
Transform the points by the projection matrix
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec(vec, M)
def proj_transform_clip(xs, ys, zs, M):
"""
Transform the points by the projection matrix
and return the clipping result
returns txs,tys,tzs,tis
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec_clip(vec, M)
transform = proj_transform
def proj_points(points, M):
return list(zip(*proj_trans_points(points, M)))
def proj_trans_points(points, M):
xs, ys, zs = list(zip(*points))
return proj_transform(xs, ys, zs, M)
def proj_trans_clip_points(points, M):
xs, ys, zs = list(zip(*points))
return proj_transform_clip(xs, ys, zs, M)
def test_proj_draw_axes(M, s=1):
import pylab
xs, ys, zs = [0, s, 0, 0], [0, 0, s, 0], [0, 0, 0, s]
txs, tys, tzs = proj_transform(xs, ys, zs, M)
o, ax, ay, az = (txs[0], tys[0]), (txs[1], tys[1]), \
(txs[2], tys[2]), (txs[3], tys[3])
lines = [(o, ax), (o, ay), (o, az)]
ax = pylab.gca()
linec = LineCollection(lines)
ax.add_collection(linec)
for x, y, t in zip(txs, tys, ['o', 'x', 'y', 'z']):
pylab.text(x, y, t)
def test_proj_make_M(E=None):
# eye point
E = E or np.array([1, -1, 2]) * 1000
#E = np.array([20,10,20])
R = np.array([1, 1, 1]) * 100
V = np.array([0, 0, 1])
viewM = view_transformation(E, R, V)
perspM = persp_transformation(100, -100)
M = np.dot(perspM, viewM)
return M
def test_proj():
import pylab
M = test_proj_make_M()
ts = ['%d' % i for i in [0,1,2,3,0,4,5,6,7,4]]
xs, ys, zs = [0,1,1,0,0, 0,1,1,0,0], [0,0,1,1,0, 0,0,1,1,0], \
[0,0,0,0,0, 1,1,1,1,1]
xs, ys, zs = [np.array(v)*300 for v in (xs, ys, zs)]
#
test_proj_draw_axes(M, s=400)
txs, tys, tzs = proj_transform(xs, ys, zs, M)
ixs, iys, izs = inv_transform(txs, tys, tzs, M)
pylab.scatter(txs, tys, c=tzs)
pylab.plot(txs, tys, c='r')
for x, y, t in zip(txs, tys, ts):
pylab.text(x, y, t)
pylab.xlim(-0.2, 0.2)
pylab.ylim(-0.2, 0.2)
pylab.show()
def rot_x(V, alpha):
cosa, sina = np.cos(alpha), np.sin(alpha)
M1 = np.array([[1,0,0,0],
[0,cosa,-sina,0],
[0,sina,cosa,0],
[0,0,0,0]])
return np.dot(M1, V)
def test_rot():
V = [1,0,0,1]
print(rot_x(V, np.pi/6))
V = [0,1,0,1]
print(rot_x(V, np.pi/6))
if __name__ == "__main__":
test_proj()
| mit |
plissonf/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
fdns/TSDB-benchmarks | presenter/src/main.py | 1 | 5277 | import matplotlib.pyplot as plt
from loader import load
import logging
def graph_cpu_usage_average(data, label, testname, fig=None, index=0):
if fig is None:
fig = plt.figure()
data = data['stats']
plt.figure(fig.number)
plt.title('{}: Tiempo de CPU utilizado promedio Vs Tiempo'.format(testname))
plt.xlabel('Tiempo desde inicio de mediciones [minutos]')
plt.ylabel('Tiempo de CPU utilizado promedio [segundos]')
baset = data[0]['timestamp']
time = [(x['timestamp'] - baset)/60 for x in data]
# Translate to a diff array
diffs = []
for i in range(1, len(data)):
diffs.append((data[i]['cpu'] - data[i-1]['cpu'])/100.)
time = time[1:]
# Calculating a moving average
moving = []
result = []
for x in diffs:
moving.append(x)
if len(moving) > 20:
moving.pop(0)
result.append(sum(moving)/len(moving))
# Calculate the trend
#import numpy
#z = numpy.polyfit(time, result, 1)
#p = numpy.poly1d(z)
#plt.plot(time, p(time), label=label)
plt.plot(time, result, label=label)
plt.legend()
return fig
def graph_cpu_usuage(data, label, testname, fig=None, index=0):
if fig is None:
fig = plt.figure()
data = data['stats']
plt.figure(fig.number)
plt.title('{}: Tiempo de CPU utilizado Vs Tiempo'.format(testname))
plt.xlabel('Tiempo desde inicio de mediciones [minutos]')
plt.ylabel('Tiempo de CPU utilizado desde inicio de las mediciones [segundos]')
baset = data[0]['timestamp']
base_cpu = data[0]['cpu']
plt.plot([(x['timestamp'] - baset)/60 for x in data], [(x['cpu'] - base_cpu)/100 for x in data], label=label)
plt.legend()
return fig
def graph_disk_usuage(data, label, testname, fig=None, index=0):
if fig is None:
fig = plt.figure()
data = data['stats']
plt.figure(fig.number)
plt.title('{}: Espacio utilizado en memoria secundaria Vs Tiempo'.format(testname))
plt.xlabel('Tiempo desde inicio de mediciones [minutos]')
plt.ylabel('Espacio utilizado en memoria secundaria [megabytes]')
base = data[0]['timestamp']
plt.plot([(x['timestamp'] - base)/60 for x in data], [(x['disk'])/1024/1024 for x in data], label=label)
plt.legend()
return fig
def graph_memory_usuage(data, label, testname, fig=None, index=0):
if fig is None:
fig = plt.figure()
data = data['stats']
plt.figure(fig.number)
plt.title('{}: Espacio utilizado en memoria primaria Vs Tiempo'.format(testname))
plt.xlabel('Tiempo desde inicio de mediciones [minutos]')
plt.ylabel('Espacio utilizado en memoria primaria [megabytes]')
base = data[0]['timestamp']
plt.plot([(x['timestamp'] - base)/60 for x in data], [(x['memory'])/1024/1024 for x in data], label=label)
plt.legend()
return fig
def graph_query_time(data, label, testname, fig=None, index=0):
if fig is None:
fig = plt.figure()
data = data['query']
plt.figure(fig.number)
plt.title('{}: Tiempo de consulta Vs Tiempo'.format(testname))
plt.xlabel('Tiempo desde inicio de mediciones [minutos]')
plt.ylabel('Tiempo utilizado en obtener los datos [minutos]')
base = data[0][0]
plt.plot([(x[0]-base)/60 for x in data], [x[1] for x in data], label=label)
plt.legend()
return fig
LABELS={}
def graph_bar_query_time(data, label, testname, fig=None, index=0):
global LABELS
if fig is None:
fig = plt.figure()
LABELS[testname] = []
data = data['query']
plt.figure(fig.number)
plt.title('{}: Tiempo de consulta Vs Tiempo'.format(testname))
plt.xlabel('Tiempo desde inicio de mediciones [minutos]')
plt.ylabel('Tiempo utilizado en obtener los datos [minutos]')
base = data[0][0]
lowcap = base + 4*60*60
upcap = lowcap + 2*60*60
values = [x[1] for x in data if lowcap <= x[0] <= upcap]
if len(values) > 0:
LABELS[testname].append(label)
plt.bar(len(LABELS[testname])*0.6, sum(values) / len(values), width=0.4, label=label)
plt.xticks([(x+1) * 0.6 for x in range(len(LABELS[testname]))], LABELS[testname])
plt.legend()
return fig
def main():
graphs = (
graph_cpu_usuage,
graph_disk_usuage,
graph_memory_usuage,
graph_query_time,
graph_cpu_usage_average,graph_bar_query_time)
tests = [
('domain', 'Dominio'),
('mask', 'Mascara de Red'),
('length', 'Largode paquetes')
]
databases = [('clickhouse', 'ClickHouse'), # Require SSE4.2
('druid', 'Druid'),
('elasticsearch', 'ElasticSearch'),
('influxdb', 'InfluxDB'),
('prometheus', 'Prometheus'),
('opentsdb', 'OpenTSDB')
]
for test in tests:
fig = [None for _ in range(len(graphs))]
index = [0 for _ in range(len(graphs))]
for db in databases:
data = load('../../out/{}_{}_1.out'.format(db[0], test[0]))
if data:
for i in range(len(graphs)):
fig[i] = graphs[i](data, db[1], test[1], fig[i], index[i])
index[i] += 1
plt.show()
if __name__ == '__main__':
logging.basicConfig()
main()
| mit |
aaronzink/tensorflow-visual-inspection | models/autoencoder/VariationalAutoencoderRunner.py | 12 | 1653 | import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder_models.VariationalAutoencoder import VariationalAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def min_max_scale(X_train, X_test):
preprocessor = prep.MinMaxScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = VariationalAutoencoder(n_input = 784,
n_hidden = 200,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
| apache-2.0 |
kbai/specfem3d | utils/EXTERNAL_CODES_coupled_with_SPECFEM3D/AxiSEM_for_SPECFEM3D/AxiSEM_modif_for_coupling_with_specfem/SOLVER/UTILS/hemispherical_model.py | 3 | 2107 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Define layer boundaries (one more than layers)
layers = [1217.5, 1190., 1160., 1100.]
# Define angles of hemispherical boundaries with a linearly interpolated region in between
angles = [[45., 55.], [50., 60.], [55., 65.]]
vp_in = np.ones((len(layers) - 1, len(angles)))
# define perturbations in each layer and hemisphere
vp_in[0,0] = 0.
vp_in[0,1] = 2.
vp_in[1,0] = 0.
vp_in[1,1] = 5.
vp_in[2,0] = 0.
vp_in[2,1] = 2.
# number of points in theta direction (fine sampling usefull in combination with nearest
# neighbour interpolation)
ntheta = 721
dtheta = 180. / (ntheta - 1)
nlayers = len(layers) - 1
# number of radial point per layer. with nearest neighbour interpolation 2 is fine
nlpl = 2
# distance of points from layer boundaries (e.g. to avoid perturbations on both sides of a
# discontinuity)
dr = .01
f = open('model.sph', 'w')
vp = 0.
vs = 0.
rho = 0.
# total number of points. +1 for the additional zero layer at the bottom
npoints = (nlayers * nlpl + 1) * ntheta
print >> f, npoints
# write model file
for l in np.arange(nlayers):
for r in np.linspace(layers[l] - dr, layers[l+1] + dr, nlpl):
for theta in np.linspace(0., 180., ntheta):
if theta < angles[l][0]:
vp = vp_in[l,0]
elif theta > angles[l][1]:
vp = vp_in[l,1]
else:
# linear interpolation in the central region
vp = vp_in[l,0] \
+ (vp_in[l,1] - vp_in[l,0]) / (angles[l][1] - angles[l][0]) \
* (theta - angles[l][0])
print >> f, '%7.2f %6.2f %5.2f %5.2f %5.2f ' % (r, theta, vp, vs, rho)
# additional zero (relative perturbation!) layer at the bottom to make sure the last layer
# does not extent to the next element boundary. Same approach might be usefull for the
# first layer, but in this case it is the ICB anyway
vp = 0.
r = layers[-1] - dr
for theta in np.linspace(0., 180., ntheta):
print >> f, '%7.2f %6.2f %5.2f %5.2f %5.2f ' % (r, theta, vp, vs, rho)
f.close
| gpl-2.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| gpl-2.0 |
LiaoPan/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
shangwuhencc/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 11 | 39569 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2*ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=1,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less( mse, 6.0 )
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0, max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
yunfeilu/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
amolkahat/pandas | asv_bench/benchmarks/indexing_engines.py | 5 | 2223 | import numpy as np
from pandas._libs import index as libindex
def _get_numeric_engines():
engine_names = [
('Int64Engine', np.int64), ('Int32Engine', np.int32),
('Int16Engine', np.int16), ('Int8Engine', np.int8),
('UInt64Engine', np.uint64), ('UInt32Engine', np.uint32),
('UInt16engine', np.uint16), ('UInt8Engine', np.uint8),
('Float64Engine', np.float64), ('Float32Engine', np.float32),
]
return [(getattr(libindex, engine_name), dtype)
for engine_name, dtype in engine_names
if hasattr(libindex, engine_name)]
class NumericEngineIndexing(object):
params = [_get_numeric_engines(),
['monotonic_incr', 'monotonic_decr', 'non_monotonic'],
]
param_names = ['engine_and_dtype', 'index_type']
def setup(self, engine_and_dtype, index_type):
engine, dtype = engine_and_dtype
N = 10**5
values = list([1] * N + [2] * N + [3] * N)
arr = {
'monotonic_incr': np.array(values, dtype=dtype),
'monotonic_decr': np.array(list(reversed(values)),
dtype=dtype),
'non_monotonic': np.array([1, 2, 3] * N, dtype=dtype),
}[index_type]
self.data = engine(lambda: arr, len(arr))
# code belows avoids populating the mapping etc. while timing.
self.data.get_loc(2)
def time_get_loc(self, engine_and_dtype, index_type):
self.data.get_loc(2)
class ObjectEngineIndexing(object):
params = [('monotonic_incr', 'monotonic_decr', 'non_monotonic')]
param_names = ['index_type']
def setup(self, index_type):
N = 10**5
values = list('a' * N + 'b' * N + 'c' * N)
arr = {
'monotonic_incr': np.array(values, dtype=object),
'monotonic_decr': np.array(list(reversed(values)), dtype=object),
'non_monotonic': np.array(list('abc') * N, dtype=object),
}[index_type]
self.data = libindex.ObjectEngine(lambda: arr, len(arr))
# code belows avoids populating the mapping etc. while timing.
self.data.get_loc('b')
def time_get_loc(self, index_type):
self.data.get_loc('b')
| bsd-3-clause |
mehdidc/py-earth | examples/plot_derivatives.py | 4 | 1177 | """
============================================
Plotting derivatives of simple sine function
============================================
A simple example plotting a fit of the sine function and
the derivatives computed by Earth.
"""
import numpy
import matplotlib.pyplot as plt
from pyearth import Earth
# Create some fake data
numpy.random.seed(2)
m = 10000
n = 10
X = 20 * numpy.random.uniform(size=(m, n)) - 10
y = 10*numpy.sin(X[:, 6]) + 0.25*numpy.random.normal(size=m)
# Compute the known true derivative with respect to the predictive variable
y_prime = 10*numpy.cos(X[:, 6])
# Fit an Earth model
model = Earth(max_degree=2, minspan_alpha=.5, smooth=True)
model.fit(X, y)
# Print the model
print(model.trace())
print(model.summary())
# Get the predicted values and derivatives
y_hat = model.predict(X)
y_prime_hat = model.predict_deriv(X, 'x6')
# Plot true and predicted function values and derivatives
# for the predictive variable
plt.subplot(211)
plt.plot(X[:, 6], y, 'r.')
plt.plot(X[:, 6], y_hat, 'b.')
plt.ylabel('function')
plt.subplot(212)
plt.plot(X[:, 6], y_prime, 'r.')
plt.plot(X[:, 6], y_prime_hat[:, 0], 'b.')
plt.ylabel('derivative')
plt.show()
| bsd-3-clause |
jefflyn/buddha | src/mlia/Ch10/kMeans.py | 3 | 6280 | '''
Created on Feb 16, 2011
k Means Clustering for Ch10 of Machine Learning in Action
@author: Peter Harrington
'''
from numpy import *
def loadDataSet(fileName): #general function to parse tab -delimited floats
dataMat = [] #assume last column is target value
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = map(float,curLine) #map all elements to float()
dataMat.append(fltLine)
return dataMat
def distEclud(vecA, vecB):
return sqrt(sum(power(vecA - vecB, 2))) #la.norm(vecA-vecB)
def randCent(dataSet, k):
n = shape(dataSet)[1]
centroids = mat(zeros((k,n)))#create centroid mat
for j in range(n):#create random cluster centers, within bounds of each dimension
minJ = min(dataSet[:,j])
rangeJ = float(max(dataSet[:,j]) - minJ)
centroids[:,j] = mat(minJ + rangeJ * random.rand(k,1))
return centroids
def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m,2)))#create mat to assign data points
#to a centroid, also holds SE of each point
centroids = createCent(dataSet, k)
clusterChanged = True
while clusterChanged:
clusterChanged = False
for i in range(m):#for each data point assign it to the closest centroid
minDist = inf; minIndex = -1
for j in range(k):
distJI = distMeas(centroids[j,:],dataSet[i,:])
if distJI < minDist:
minDist = distJI; minIndex = j
if clusterAssment[i,0] != minIndex: clusterChanged = True
clusterAssment[i,:] = minIndex,minDist**2
print centroids
for cent in range(k):#recalculate centroids
ptsInClust = dataSet[nonzero(clusterAssment[:,0].A==cent)[0]]#get all the point in this cluster
centroids[cent,:] = mean(ptsInClust, axis=0) #assign centroid to mean
return centroids, clusterAssment
def biKmeans(dataSet, k, distMeas=distEclud):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m,2)))
centroid0 = mean(dataSet, axis=0).tolist()[0]
centList =[centroid0] #create a list with one centroid
for j in range(m):#calc initial Error
clusterAssment[j,1] = distMeas(mat(centroid0), dataSet[j,:])**2
while (len(centList) < k):
lowestSSE = inf
for i in range(len(centList)):
ptsInCurrCluster = dataSet[nonzero(clusterAssment[:,0].A==i)[0],:]#get the data points currently in cluster i
centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)
sseSplit = sum(splitClustAss[:,1])#compare the SSE to the currrent minimum
sseNotSplit = sum(clusterAssment[nonzero(clusterAssment[:,0].A!=i)[0],1])
print "sseSplit, and notSplit: ",sseSplit,sseNotSplit
if (sseSplit + sseNotSplit) < lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat
bestClustAss = splitClustAss.copy()
lowestSSE = sseSplit + sseNotSplit
bestClustAss[nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList) #change 1 to 3,4, or whatever
bestClustAss[nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit
print 'the bestCentToSplit is: ',bestCentToSplit
print 'the len of bestClustAss is: ', len(bestClustAss)
centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]#replace a centroid with two best centroids
centList.append(bestNewCents[1,:].tolist()[0])
clusterAssment[nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:]= bestClustAss#reassign new clusters, and SSE
return mat(centList), clusterAssment
import urllib
import json
def geoGrab(stAddress, city):
apiStem = 'http://where.yahooapis.com/geocode?' #create a dict and constants for the goecoder
params = {}
params['flags'] = 'J'#JSON return type
params['appid'] = 'aaa0VN6k'
params['location'] = '%s %s' % (stAddress, city)
url_params = urllib.urlencode(params)
yahooApi = apiStem + url_params #print url_params
print yahooApi
c=urllib.urlopen(yahooApi)
return json.loads(c.read())
from time import sleep
def massPlaceFind(fileName):
fw = open('places.txt', 'w')
for line in open(fileName).readlines():
line = line.strip()
lineArr = line.split('\t')
retDict = geoGrab(lineArr[1], lineArr[2])
if retDict['ResultSet']['Error'] == 0:
lat = float(retDict['ResultSet']['Results'][0]['latitude'])
lng = float(retDict['ResultSet']['Results'][0]['longitude'])
print "%s\t%f\t%f" % (lineArr[0], lat, lng)
fw.write('%s\t%f\t%f\n' % (line, lat, lng))
else: print "error fetching"
sleep(1)
fw.close()
def distSLC(vecA, vecB):#Spherical Law of Cosines
a = sin(vecA[0,1]*pi/180) * sin(vecB[0,1]*pi/180)
b = cos(vecA[0,1]*pi/180) * cos(vecB[0,1]*pi/180) * \
cos(pi * (vecB[0,0]-vecA[0,0]) /180)
return arccos(a + b)*6371.0 #pi is imported with numpy
import matplotlib
import matplotlib.pyplot as plt
def clusterClubs(numClust=5):
datList = []
for line in open('places.txt').readlines():
lineArr = line.split('\t')
datList.append([float(lineArr[4]), float(lineArr[3])])
datMat = mat(datList)
myCentroids, clustAssing = biKmeans(datMat, numClust, distMeas=distSLC)
fig = plt.figure()
rect=[0.1,0.1,0.8,0.8]
scatterMarkers=['s', 'o', '^', '8', 'p', \
'd', 'v', 'h', '>', '<']
axprops = dict(xticks=[], yticks=[])
ax0=fig.add_axes(rect, label='ax0', **axprops)
imgP = plt.imread('Portland.png')
ax0.imshow(imgP)
ax1=fig.add_axes(rect, label='ax1', frameon=False)
for i in range(numClust):
ptsInCurrCluster = datMat[nonzero(clustAssing[:,0].A==i)[0],:]
markerStyle = scatterMarkers[i % len(scatterMarkers)]
ax1.scatter(ptsInCurrCluster[:,0].flatten().A[0], ptsInCurrCluster[:,1].flatten().A[0], marker=markerStyle, s=90)
ax1.scatter(myCentroids[:,0].flatten().A[0], myCentroids[:,1].flatten().A[0], marker='+', s=300)
plt.show()
| artistic-2.0 |
bikong2/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
eg-zhang/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
lucasb-eyer/pydensecrf | tests/issue26.py | 2 | 2130 |
# coding: utf-8
# In[1]:
# import sys
# sys.path.insert(0,'/home/dlr16/Applications/anaconda2/envs/PyDenseCRF/lib/python2.7/site-packages')
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
# get_ipython().magic(u'matplotlib inline')
plt.rcParams['figure.figsize'] = (20, 20)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral, create_pairwise_gaussian
# ## Start from scratch
# In[3]:
from scipy.stats import multivariate_normal
x, y = np.mgrid[0:512, 0:512]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
rv = multivariate_normal([256, 256], 128*128)
# In[4]:
probs = rv.pdf(pos)
probs = (probs-probs.min()) / (probs.max()-probs.min())
probs = 0.2 * (probs-0.5) + 0.5
probs = np.tile(probs[:,:,np.newaxis],(1,1,2))
probs[:,:,1] = 1 - probs[:,:,0]
# plt.plot(probs[256,:,0])
# transpose for graph
probs = np.transpose(probs,(2,0,1))
# In[17]:
# XX:IF NCHANNELS != 3, I GET ERRONEOUS OUTPUT
nchannels=4
U = unary_from_softmax(probs) # note: num classes is first dim
d = dcrf.DenseCRF2D(probs.shape[1],probs.shape[2],probs.shape[0])
d.setUnaryEnergy(U)
Q_Unary = d.inference(10)
map_soln_Unary = np.argmax(Q_Unary, axis=0).reshape((probs.shape[1],probs.shape[2]))
tmp_img = np.zeros((probs.shape[1],probs.shape[2],nchannels)).astype(np.uint8)
tmp_img[150:362,150:362,:] = 1
energy = create_pairwise_bilateral(sdims=(10,10), schan=0.01, img=tmp_img, chdim=2)
d.addPairwiseEnergy(energy, compat=10)
# This is wrong and will now raise a ValueError:
#d.addPairwiseBilateral(sxy=(10,10),
# srgb=0.01,
# rgbim=tmp_img,
# compat=10)
Q = d.inference(100)
map_soln = np.argmax(Q, axis=0).reshape((probs.shape[1],probs.shape[2]))
plt.subplot(2,2,1)
plt.imshow(probs[0,:,:])
plt.colorbar()
plt.subplot(2,2,2)
plt.imshow(map_soln_Unary)
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow(tmp_img[:,:,0])
plt.colorbar()
plt.subplot(2,2,4)
plt.imshow(map_soln)
plt.colorbar()
plt.show()
| mit |
sniemi/EuclidVisibleInstrument | fitting/splineFitting.py | 1 | 4247 | """
Example how to spline B-spline to fake data.
:requires: Python 2.5 or later (no 3.x compatible)
:requires: NumPy
:requires: SciPy
TESTED:
Python 2.5.1
NumPy: 1.4.0.dev7576
SciPy: 0.7.1
matplotlib 1.0.svn
HISTORY:
Created on November 26, 2009
:version: 0.1: test release (SMN)
:author: Sami-Matias Niemi
:contact: s.niemi@ucl.ac.uk
"""
import numpy as N
import scipy.signal as SS
import scipy.interpolate as I
import scipy.optimize as O
import pylab as P
import math
__author__ = 'Sami-Matias Niemi (s.niemi@ucl.ac.uk)'
__version__ = '0.1'
class SplineFitting:
'''
Fits a B-spline representation of 1-D curve.
Uses Levenberg-Marquardt algorithm for minimizing
the sum of squares.
'''
def __init__(self, xnodes, spline_order=3):
self.xnodes = xnodes
self.k = spline_order
def _fakeData(self):
x = N.linspace(1, 1024, 1024)
y = self._gety(x, 2.5, 1.3, 0.5, 10)
yn = y + 0.25 * N.random.normal(size=len(x))
return x, yn
def _gety(self, x, a, b, c, d):
return a * N.exp(-b * x) + c * N.log(d * x ** 2)
def fitfunc(self, x, ynodes):
'''
Function that is fitted.
This can be changed to whatever function.
Note that ynodes can then be a list of parameters.
:return: 1-D B-spline value at each x.
'''
return I.splev(x, I.splrep(self.xnodes, ynodes, k=self.k))
def errfunc(self, ynodes, x, y):
'''
Error function.
:return: fit - ydata
'''
return self.fitfunc(x, ynodes) - y
def doFit(self, ynodes, x, y):
'''
Return the point which minimizes the sum of squares of M (non-linear)
equations in N unknowns given a starting estimate, x0, using a
modification of the Levenberg-Marquardt algorithm.
:return: fitted parameters, error/success message
'''
return O.leastsq(self.errfunc, ynodes, args=(x, y))
if __name__ == '__main__':
'''
Executed if ran from a command line.
'''
#Initializes the instance with dummy xnodes
Spline = SplineFitting([0, ])
#Makes some faked data
x, y = Spline._fakeData()
#Median filter the data
medianFiltered = SS.medfilt(y, 7)
#Spline nodes and initial guess for y positions from median filtered
xnods = N.arange(0, 1050, 50)
ynods = medianFiltered[xnods]
#Updates dummy xnodes in Spline instance with read deal
Spline.xnodes = xnods
#Do the fitting
fittedYnodes, success = Spline.doFit(ynods, x, y)
#We can check how good the fit is
chi2 = N.sum(N.power(Spline.errfunc(fittedYnodes, x, y), 2))
dof = len(ynods) - 1.
crit = (math.sqrt(2 * (dof - 1.)) + 1.635) ** 2 #only valid for large dofs
print 'Chi**2 %6.2f vs %6.2f' % (chi2, crit)
#note that there is also chisquare in scipy.stats which
#could be used to evaluate p-values...
#Lets plot the data for visual inspection
fig = P.figure()
left, width = 0.1, 0.8
rect1 = [left, 0.3, width, 0.65]
rect2 = [left, 0.1, width, 0.2]
ax1 = fig.add_axes(rect2) #left, bottom, width, height
ax2 = fig.add_axes(rect1)
ax2.plot(x, y, label='Noisy data')
ax2.plot(x, medianFiltered, 'y-', label='Median Filtered', lw=2)
ax2.plot(x, Spline.fitfunc(x, ynods), 'm-', label='Initial Spline', lw=2)
ax2.plot(x, Spline.fitfunc(x, fittedYnodes), 'r-', label='Fitted Spline', lw=2)
ax2.plot(xnods, ynods, 'go', label='Initial Spline nodes')
ax2.plot(xnods, fittedYnodes, 'gs', label='Fitted Spline nodes')
ax1.axhline(0)
ax1.plot(x, SS.medfilt((y - Spline.fitfunc(x, ynods)), 55), 'm-', label='Initial guess residuals')
ax1.plot(x, SS.medfilt((y - Spline.fitfunc(x, fittedYnodes)), 55), 'r-', label='Fitted residuals')
ax1.set_xlim(0, 1000)
ax2.set_xlim(0, 1000)
ax2.set_xticklabels([])
ax2.set_yticks(ax2.get_yticks()[1:])
ax1.set_yticks(ax1.get_yticks()[::2])
ax1.set_ylabel('Residuals')
ax2.set_ylabel('Arbitrary Counts')
ax1.set_xlabel('Pixels')
try:
#IRAFDEV has too old matplotlib...
ax2.legend(numpoints=1, loc='best')
except:
ax2.legend(loc='best')
P.savefig('SplineFitting.pdf')
| bsd-2-clause |
timothyb0912/pylogit | tests/test_construct_estimator.py | 1 | 26497 | """
Tests for the construct_estimator.py file.
"""
import unittest
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.sparse import csr_matrix, eye
import pylogit.asym_logit as asym
import pylogit.conditional_logit as mnl
import pylogit.clog_log as clog
import pylogit.scobit as scobit
import pylogit.uneven_logit as uneven
import pylogit.mixed_logit_calcs as mlc
import pylogit.mixed_logit as mixed_logit
import pylogit.nested_logit as nested_logit
import pylogit.construct_estimator as constructor
class ConstructorTests(unittest.TestCase):
def make_asym_model(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
fake_shapes = np.array([-1, 1])
# Create names for the intercept parameters
fake_shape_names = ["Shape 1", "Shape 2"]
# Record the position of the shape parameter that is being constrained
fake_shape_ref_pos = 2
# Calculate the 'natural' shape parameters
natural_shapes = asym._convert_eta_to_c(fake_shapes,
fake_shape_ref_pos)
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_shapes,
fake_intercepts,
fake_betas))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the Asymmetric Logit constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
constructor_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
# Create a variable for the kwargs being passed to the constructor
constructor_kwargs = {"intercept_ref_pos": fake_intercept_ref_pos,
"shape_ref_pos": fake_shape_ref_pos,
"names": fake_names,
"intercept_names": fake_intercept_names,
"shape_names": fake_shape_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
model_obj = asym.MNAL(*constructor_args, **constructor_kwargs)
model_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
model_obj.intercepts =\
pd.Series(fake_intercepts, index=fake_intercept_names)
model_obj.shapes = pd.Series(fake_shapes, index=fake_shape_names)
model_obj.nests = None
model_obj.params =\
pd.concat([model_obj.shapes,
model_obj.intercepts,
model_obj.coefs],
axis=0, ignore_index=False)
return model_obj
def make_uneven_and_scobit_models(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
fake_shapes = np.array([-1, 1, 2])
# Create names for the intercept parameters
fake_shape_names = ["Shape 1", "Shape 2", "Shape 3"]
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_shapes,
fake_intercepts,
fake_betas))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the model constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionary for the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the choice models.
constructor_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
# Create a variable for the kwargs being passed to the constructor
constructor_kwargs = {"intercept_ref_pos": fake_intercept_ref_pos,
"names": fake_names,
"intercept_names": fake_intercept_names,
"shape_names": fake_shape_names}
# Initialize the various choice models
uneven_obj = uneven.MNUL(*constructor_args, **constructor_kwargs)
scobit_obj = scobit.MNSL(*constructor_args, **constructor_kwargs)
for model_obj in [uneven_obj, scobit_obj]:
model_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
model_obj.intercepts =\
pd.Series(fake_intercepts, index=fake_intercept_names)
model_obj.shapes = pd.Series(fake_shapes, index=fake_shape_names)
model_obj.nests = None
model_obj.params =\
pd.concat([model_obj.shapes,
model_obj.intercepts,
model_obj.coefs],
axis=0, ignore_index=False)
return uneven_obj, scobit_obj
def make_clog_and_mnl_models(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_intercepts, fake_betas))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the model constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
mnl_spec = OrderedDict()
mnl_names = OrderedDict()
mnl_spec["intercept"] =[1, 2]
mnl_names["intercept"] = fake_intercept_names
mnl_spec["x"] = fake_specification["x"]
mnl_names["x"] = fake_names["x"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
clog_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
mnl_args = deepcopy(clog_args)
mnl_args[-1] = mnl_spec
# Create a variable for the kwargs being passed to the constructor
clog_kwargs = {"names": fake_names,
"intercept_ref_pos": fake_intercept_ref_pos,
"intercept_names": fake_intercept_names}
mnl_kwargs = {"names": mnl_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
clog_obj = clog.MNCL(*clog_args, **clog_kwargs)
mnl_obj = mnl.MNL(*mnl_args, **mnl_kwargs)
# Create the desired model attributes for the clog log model
clog_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
clog_obj.intercepts =\
pd.Series(fake_intercepts, index=fake_intercept_names)
clog_obj.shapes = None
clog_obj.nests = None
clog_obj.params =\
pd.concat([clog_obj.intercepts, clog_obj.coefs],
axis=0, ignore_index=False)
mnl_obj.params = clog_obj.params.copy()
mnl_obj.coefs = mnl_obj.params.copy()
mnl_obj.intercepts = None
mnl_obj.shapes = None
mnl_obj.nests = None
return clog_obj, mnl_obj
def make_mixed_model(self):
# Fake random draws where Row 1 is for observation 1 and row 2 is
# for observation 2. Column 1 is for draw 1 and column 2 is for draw 2
fake_draws = mlc.get_normal_draws(2, 2, 1, seed=1)[0]
# Create the betas to be used during the tests
fake_betas = np.array([0.3, -0.6, 0.2])
fake_std = 1
fake_betas_ext = np.concatenate((fake_betas,
np.array([fake_std])),
axis=0)
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 1, 2.5],
[0, 0, 3.5],
[1, 0, 0.5],
[0, 1, 1.0],
[0, 0, 1.5]])
# Record what positions in the design matrix are being mixed over
mixing_pos = [2]
# Create the arrays that specify the choice situation, individual id
# and alternative ids
situation_ids = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
individual_ids = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2])
alternative_ids = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
# Create a fake array of choices
choice_array = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0])
# Create the 'rows_to_mixers' sparse array for this dataset
# Denote the rows that correspond to observation 1 and observation 2
obs_1_rows = np.ones(fake_design.shape[0])
# Make sure the rows for observation 2 are given a zero in obs_1_rows
obs_1_rows[-3:] = 0
obs_2_rows = 1 - obs_1_rows
# Create the row_to_mixers scipy.sparse matrix
fake_rows_to_mixers = csr_matrix(obs_1_rows[:, None] ==
np.array([1, 0])[None, :])
# Create the rows_to_obs scipy.sparse matrix
fake_rows_to_obs = csr_matrix(situation_ids[:, None] ==
np.arange(1, 4)[None, :])
# Create the design matrix that we should see for draw 1 and draw 2
arrays_to_join = (fake_design.copy(),
fake_design.copy()[:, -1][:, None])
fake_design_draw_1 = np.concatenate(arrays_to_join, axis=1)
fake_design_draw_2 = fake_design_draw_1.copy()
# Multiply the 'random' coefficient draws by the corresponding variable
fake_design_draw_1[:, -1] *= (obs_1_rows *
fake_draws[0, 0] +
obs_2_rows *
fake_draws[1, 0])
fake_design_draw_2[:, -1] *= (obs_1_rows *
fake_draws[0, 1] +
obs_2_rows *
fake_draws[1, 1])
extended_design_draw_1 = fake_design_draw_1[:, None, :]
extended_design_draw_2 = fake_design_draw_2[:, None, :]
fake_design_3d = np.concatenate((extended_design_draw_1,
extended_design_draw_2),
axis=1)
# Create the fake systematic utility values
sys_utilities_draw_1 = fake_design_draw_1.dot(fake_betas_ext)
sys_utilities_draw_2 = fake_design_draw_2.dot(fake_betas_ext)
#####
# Calculate the probabilities of each alternatve in each choice
# situation
#####
long_exp_draw_1 = np.exp(sys_utilities_draw_1)
long_exp_draw_2 = np.exp(sys_utilities_draw_2)
ind_exp_sums_draw_1 = fake_rows_to_obs.T.dot(long_exp_draw_1)
ind_exp_sums_draw_2 = fake_rows_to_obs.T.dot(long_exp_draw_2)
long_exp_sum_draw_1 = fake_rows_to_obs.dot(ind_exp_sums_draw_1)
long_exp_sum_draw_2 = fake_rows_to_obs.dot(ind_exp_sums_draw_2)
long_probs_draw_1 = long_exp_draw_1 / long_exp_sum_draw_1
long_probs_draw_2 = long_exp_draw_2 / long_exp_sum_draw_2
prob_array = np.concatenate((long_probs_draw_1[:, None],
long_probs_draw_2[:, None]),
axis=1)
###########
# Create a mixed logit object for later use.
##########
# Create a fake old long format dataframe for mixed logit model object
alt_id_column = "alt_id"
situation_id_column = "situation_id"
obs_id_column = "observation_id"
choice_column = "choice"
data = {"x": fake_design[:, 2],
alt_id_column: alternative_ids,
situation_id_column: situation_ids,
obs_id_column: individual_ids,
choice_column: choice_array}
fake_old_df = pd.DataFrame(data)
fake_old_df["intercept"] = 1
# Create a fake specification
fake_spec = OrderedDict()
fake_names = OrderedDict()
fake_spec["intercept"] = [1, 2]
fake_names["intercept"] = ["ASC 1", "ASC 2"]
fake_spec["x"] = [[1, 2, 3]]
fake_names["x"] = ["beta_x"]
# Specify the mixing variable
fake_mixing_vars = ["beta_x"]
# Create a fake version of a mixed logit model object
args = [fake_old_df,
alt_id_column,
situation_id_column,
choice_column,
fake_spec]
kwargs = {"names": fake_names,
"mixing_id_col": obs_id_column,
"mixing_vars": fake_mixing_vars}
mixl_obj = mixed_logit.MixedLogit(*args, **kwargs)
# Set all the necessary attributes for prediction:
# design_3d, coefs, intercepts, shapes, nests, mixing_pos
mixl_obj.design_3d = fake_design_3d
mixl_obj.ind_var_names += ["Sigma X"]
mixl_obj.coefs =\
pd.Series(fake_betas_ext, index=mixl_obj.ind_var_names)
mixl_obj.intercepts = None
mixl_obj.shapes = None
mixl_obj.nests = None
mixl_obj.params = mixl_obj.coefs.copy()
return mixl_obj
def make_nested_model(self):
# Create the betas to be used during the tests
fake_betas = np.array([0.3, -0.6, 0.2])
# Create the fake nest coefficients to be used during the tests
# Note that these are the 'natural' nest coefficients, i.e. the
# inverse of the scale parameters for each nest. They should be bigger
# than or equal to 1.
natural_nest_coefs = np.array([1 - 1e-16, 0.5])
# Create an array of all model parameters
fake_all_params = np.concatenate((natural_nest_coefs,
fake_betas))
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two.
# The nest memberships of these alternatives are given below.
fake_rows_to_nests = csr_matrix(np.array([[1, 0],
[1, 0],
[0, 1],
[1, 0],
[0, 1]]))
# Create a sparse matrix that maps the rows of the design matrix to the
# observatins
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 0, 3.5]])
# Create fake versions of the needed arguments for the MNL constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": range(5),
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Store the choice array
choice_array = fake_df[choice_col].values
# Create a sparse matrix that maps the chosen rows of the design
# matrix to the observatins
fake_chosen_rows_to_obs = csr_matrix(np.array([[0, 0],
[1, 0],
[0, 0],
[0, 0],
[0, 1]]))
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_specification["intercept"] = [1, 2]
fake_specification["x"] = [[1, 2, 3]]
fake_names = OrderedDict()
fake_names["intercept"] = ["ASC 1", "ASC 2"]
fake_names["x"] = ["x (generic coefficient)"]
# Create the nesting specification
fake_nest_spec = OrderedDict()
fake_nest_spec["Nest 1"] = [1, 2]
fake_nest_spec["Nest 2"] = [3]
# Create a nested logit object
args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
kwargs = {"names": fake_names,
"nest_spec": fake_nest_spec}
model_obj = nested_logit.NestedLogit(*args, **kwargs)
model_obj.coefs = pd.Series(fake_betas, index=model_obj.ind_var_names)
model_obj.intercepts = None
model_obj.shapes = None
def logit(x):
return np.log(x / (1 - x))
model_obj.nests =\
pd.Series(logit(natural_nest_coefs), index=fake_nest_spec.keys())
model_obj.params =\
pd.concat([model_obj.nests, model_obj.coefs],
axis=0, ignore_index=False)
return model_obj
def setUp(self):
"""
Create the real model objects.
"""
self.asym_model = self.make_asym_model()
self.uneven_model, self.scobit_model =\
self.make_uneven_and_scobit_models()
self.clog_model, self.mnl_model = self.make_clog_and_mnl_models()
self.mixed_model = self.make_mixed_model()
self.nested_model = self.make_nested_model()
return None
def test_create_estimation_obj(self):
# Alias the function being tested
func = constructor.create_estimation_obj
# Take note of the models that are being used in this test
models = [self.mnl_model,
self.clog_model,
self.asym_model,
self.scobit_model,
self.uneven_model,
self.nested_model,
self.mixed_model]
# Perform the desired tests
for model_obj in models:
# Get the internal model name
internal_model_name =\
constructor.display_name_to_model_type[model_obj.model_type]
# Get the estimation object class
estimation_class = (constructor.model_type_to_resources
[internal_model_name]
['estimator'])
# Get the function results
args = [model_obj, model_obj.params.values]
kwargs = {"mappings": model_obj.get_mappings_for_fit(),
"ridge": 0.25,
"constrained_pos": [0],
"weights": np.ones(model_obj.data.shape[0])}
# Make sure the function result is of the correct class.
func_result = func(*args, **kwargs)
self.assertIsInstance(func_result, estimation_class)
for key in ['ridge', 'constrained_pos', 'weights']:
expected_value = kwargs[key]
self.assertTrue(hasattr(func_result, key))
func_value = getattr(func_result, key)
if isinstance(expected_value, np.ndarray):
npt.assert_allclose(expected_value, func_value)
else:
self.assertEqual(expected_value, func_value)
return None
| bsd-3-clause |
nomadcube/scikit-learn | sklearn/tree/tests/test_tree.py | 72 | 47440 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, return_indicator=True, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
trungnt13/scikit-learn | sklearn/cluster/birch.py | 207 | 22706 | # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
tri-state-epscor/wcwave_adaptors | vwpy/isnobal.py | 2 | 50849 | """
Tools for working with IPW binary data and running the iSNOBAL model.
"""
#
# Copyright (c) 2014, Matthew Turner (maturner01.gmail.com)
#
# For the Tri-state EPSCoR Track II WC-WAVE Project
#
# Acknowledgements to Robert Lew for inspiration in the design of the IPW
# class (see https://github.com/rogerlew/RL_GIS_Sandbox/tree/master/isnobal).
#
import datetime
import logging
import subprocess
import netCDF4
import re
import warnings
import xray
from collections import namedtuple, defaultdict
from copy import deepcopy
from netCDF4 import Dataset
from numpy import (arange, array, zeros, ravel, reshape, fromstring, dtype,
floor, log10)
from numpy import sum as npsum
from numpy import round as npround
from numpy.ma import masked
from os import mkdir, listdir
from os.path import exists, dirname, basename
from os.path import join as osjoin
from pandas import date_range, DataFrame, Series, Timedelta
from progressbar import ProgressBar
from shutil import rmtree
from struct import pack
from .watershed import make_fgdc_metadata, make_watershed_metadata
from .netcdf import ncgen_from_template, utm2latlon
#: IPW standard. assumed unchanging since they've been the same for 20 years
BAND_TYPE_LOC = 1
BAND_INDEX_LOC = 2
#: For converting NetCDF to iSNOBAL, use 2 bytes for all variables except mask
NC_NBYTES = 2
NC_NBITS = 16
NC_MAXINT = pow(2, NC_NBITS) - 1
#: Container for ISNOBAL Global Band information
GlobalBand = namedtuple("GlobalBand", 'byteorder nLines nSamps nBands')
#: Check if a header is starting
IsHeaderStart = lambda headerLine: headerLine.split()[0] == "!<header>"
def AssertISNOBALInput(nc):
"""Check if a NetCDF conforms to iSNOBAL requirements for running that
model. Throw a ISNOBALNetcdfError if not
"""
if type(nc) is xray.Dataset:
nca = nc.attrs
elif type(nc) is netCDF4.Dataset:
nca = nc.ncattrs()
else:
raise Exception('NetCDF is not a valid type')
valid = ('data_tstep' in nca and 'nsteps' in nca and
'output_frequency' in nca)
if not valid:
raise ISNOBALNetcdfError("Attributes 'data_tstep', 'nsteps', "
"'output_frequency', 'bline', 'bsamp', "
"'dline', and 'dsamp' not all in NetCDF")
ncv = nc.variables
expected_variables = ['alt', 'mask', 'time', 'easting', 'northing', 'lat',
'lon', 'I_lw', 'T_a', 'e_a', 'u', 'T_g', 'S_n', 'z',
'z_0', 'z_s', 'rho', 'T_s_0', 'T_s', 'h2o_sat',
'm_pp', 'percent_snow', 'rho_snow']
not_present = []
for exp_var in expected_variables:
if exp_var not in ncv:
not_present += [exp_var]
if not_present:
raise ISNOBALNetcdfError(
"Variables " + ', '.join(not_present) +
" are missing from input NetCDF")
#: varnames for loading the NetCDF
VARNAME_BY_FILETYPE = \
{
'dem': ['alt'],
'in': ['I_lw', 'T_a', 'e_a', 'u', 'T_g', 'S_n'],
'precip': ['m_pp', 'percent_snow', 'rho_snow', 'T_pp'],
'mask': ['mask'],
'init': ['z', 'z_0', 'z_s', 'rho', 'T_s_0', 'T_s', 'h2o_sat'],
'em': ['R_n', 'H', 'L_v_E', 'G', 'M', 'delta_Q',
'E_s', 'melt', 'ro_predict', 'cc_s'],
'snow': ['z_s', 'rho', 'm_s', 'h2o', 'T_s_0',
'T_s_l', 'T_s', 'z_s_l', 'h2o_sat']
}
#: ISNOBAL variable names to be looked up to make dataframes and write metadata
#: Convert number of bytes to struct package code for unsigned integer type
PACK_DICT = \
{
1: 'B',
2: 'H',
4: 'I'
}
def isnobal(nc_in=None, nc_out_fname=None, data_tstep=60, nsteps=8758,
init_img="data/init.ipw", precip_file="data/ppt_desc",
mask_file="data/tl2p5mask.ipw", input_prefix="data/inputs/in",
output_frequency=1, em_prefix="data/outputs/em",
snow_prefix="data/outputs/snow", dt='hours', year=2010,
month=10, day='01', event_emitter=None, **kwargs):
""" Wrapper for running the ISNOBAL
(http://cgiss.boisestate.edu/~hpm/software/IPW/man1/isnobal.html)
model.
Arguments:
nc_in (netCDF4.Dataset) Input NetCDF4 dataset. See
AssertISNOBALInput for requirements.
nc_out_fname (str) Name of NetCDF file to write to, if desired
For explanations the rest, see the link above.
** Addition: It expects a pyee event emitter in order to emit messages for progress. if not provided it should just work fine
Returns:
(netCDF4.Dataset) NetCDF Dataset object of the outputs
"""
if not nc_in:
isnobalcmd = " ".join(["isnobal",
"-t " + str(data_tstep),
"-n " + str(nsteps),
"-I " + init_img,
"-p " + precip_file,
"-m " + mask_file,
"-i " + input_prefix,
"-O " + str(output_frequency),
"-e " + em_prefix,
"-s " + snow_prefix])
# TODO sanitize this isnobalcmd or better yet, avoid shell=True
logging.debug('Running isnobal')
kwargs['event_name'] = 'running_isonbal'
kwargs['event_description'] = 'Running the ISNOBAL model'
kwargs['progress_value'] = 50
if event_emitter:
event_emitter.emit('progress', **kwargs)
output = subprocess.check_output(isnobalcmd, shell=True)
logging.debug("ISNOBAL process output: " + output)
logging.debug('done runinig isnobal')
kwargs['event_name'] = 'running_isonbal'
kwargs['event_description'] = 'Done Running model'
kwargs['progress_value'] = 100
if event_emitter:
event_emitter.emit('progress',**kwargs)
# create a NetCDF of the outputs and return it
nc_out = \
generate_standard_nc(dirname(em_prefix), nc_out_fname,
data_tstep=data_tstep,
output_frequency=output_frequency, dt=dt,
year=year, month=month, day=day,
event_emitter=event_emitter, **kwargs)
return nc_out
else:
AssertISNOBALInput(nc_in)
# these are guaranteed to be present by the above assertion
data_tstep = nc_in.data_tstep
nsteps = nc_in.nsteps - 1 # isnobal steps are from one step to another
output_frequency = nc_in.output_frequency
# create standard IPW data in tmpdir; creates tmpdir
tmpdir = '/tmp/isnobalrun' + \
str(datetime.datetime.now()).replace(' ', '')
nc_to_standard_ipw(nc_in, tmpdir,event_emitter=event_emitter,**kwargs)
mkdir(osjoin(tmpdir, 'outputs'))
# nc_to_standard_ipw is well tested, we know these will be present
init_img = osjoin(tmpdir, 'init.ipw')
mask_file = osjoin(tmpdir, 'mask.ipw')
precip_file = osjoin(tmpdir, 'ppt_desc')
em_prefix = osjoin(tmpdir, 'outputs/em')
input_prefix = osjoin(tmpdir, 'inputs/in')
snow_prefix = osjoin(tmpdir, 'outputs/snow')
# recursively run isnobal with nc_in=None
nc_out = isnobal(nc_out_fname=nc_out_fname, data_tstep=data_tstep,
nsteps=nsteps, init_img=init_img,
precip_file=precip_file, mask_file=mask_file,
input_prefix=input_prefix,
output_frequency=output_frequency,
em_prefix=em_prefix, snow_prefix=snow_prefix,event_emitter=event_emitter,**kwargs)
rmtree(tmpdir)
return nc_out
class IPW(object):
"""
Represents an IPW file. Provides a data_frame attribute to access the
variables and their floating point representation as a dataframe. The
dataframe can be modified, the headers recalculated with
recalculateHeaders, and then written back to IPW binary with
writeBinary.
>>> ipw = IPW("in.0000")
>>> ipw.data_frame.T_a = ipw.data_frame.T_a + 1.0 # add 1 dg C to each temp
>>> ipw.writeBinary("in.plusOne.000")
"""
def __init__(self, input_file=None, config_file=None,
water_year=None, dt=None, file_type=None):
assert dt is None or issubclass(type(dt), datetime.timedelta)
if input_file is not None:
ipw_lines = IPWLines(input_file)
input_split = basename(input_file).split('.')
file_type = file_type or input_split[0]
# _make_bands
try:
header_dict = \
_make_bands(ipw_lines.header_lines,
VARNAME_BY_FILETYPE[file_type])
except (KeyError):
raise IPWFileError("Provide explicit file type for file %s" %
input_file)
# extract just bands from the header dictionary
bands = [band for band in header_dict.values()]
# get the nonglobal_bands in a list, ordered by band index
nonglobal_bands =\
sorted([band for varname, band in header_dict.iteritems()
if varname != 'global'],
key=lambda b: b.band_idx)
# the default configuration is used if no config file is given
if config_file is None:
config_file = \
osjoin(dirname(__file__), '../default.conf')
if file_type in ['in', 'em', 'snow']:
# set the water year to default if not given
if not water_year:
water_year = 2010
# note that we have not generalized for non-hour timestep data
if dt is None:
dt = Timedelta('1 hour')
# the iSNOBAL file naming scheme puts the integer time step
# after the dot, really as the extension
# TODO as Roger pointed out, really this is for
# a single point in time, so this timing thing is not right
start_dt = dt * int(input_split[-1])
start_datetime = \
datetime.datetime(water_year, 10, 01) + start_dt
end_datetime = start_datetime + dt
else:
start_datetime = None
end_datetime = None
# initialized when called for below
self._data_frame = None
self.input_file = input_file
self.file_type = file_type
self.header_dict = header_dict
self.binary_data = ipw_lines.binary_data
self.bands = bands
self.nonglobal_bands = nonglobal_bands
# use geo information in band0; all bands have equiv geo info
band0 = nonglobal_bands[0]
self.geotransform = [band0.bsamp - band0.dsamp / 2.0,
band0.dsamp,
0.0,
band0.bline - band0.dline / 2.0,
0.0,
band0.dline]
self.config_file = config_file
self.start_datetime = start_datetime
self.end_datetime = end_datetime
else:
self._data_frame = None
self.input_file = None
self.file_type = None
self.header_dict = None
self.binary_data = None
self.bands = None
self.nonglobal_bands = None
self.geotransform = None
self.start_datetime = None
self.end_datetime = None
return None
def recalculate_header(self):
"""
Recalculate header values
"""
_recalculate_header(self.nonglobal_bands, self.data_frame())
for band in self.nonglobal_bands:
self.header_dict[band.varname] = band
@classmethod
def precip_tuple(self, precip_file, sepchar='\t'):
"""Create list of two-lists where each element's elements are the time
index of the time step when the precipitation happened and an IPW
of the precipitation data.
"""
pptlist = map(lambda l: l.strip().split(sepchar),
open(precip_file, 'r').readlines())
return map(lambda l: (l[0], IPW(l[1], file_type='precip')), pptlist)
@classmethod
def from_nc(cls, nc_in, tstep=None, file_type=None, variable=None,
distance_units='m', coord_sys_ID='UTM'):
"""
Generate an IPW object from a NetCDF file.
>>> ipw = IPW.from_nc('dataset.nc', tstep='1', file_type='in')
>>> ipw = IPW.from_nc(nc_in)
If your data uses units of distance other than meters, set that
with kwarg `distance_units`. Simliar
Arguments:
nc_in (str or NetCDF4.Dataset) NetCDF to convert to IPW
tstep (int) The time step in whatever units are being used
file_type (str) file type of NetCDF variable, one of
'in', 'precip', 'em', 'snow', 'mask', 'init', 'dem'
variable (str or list) One or many variable names to be
incorporated into IPW file
distance_units (str) If you use a measure of distance other
than meters, put the units here
coord_sys_ID (str) Coordinate system being used
Returns:
(IPW) IPW instance built from NetCDF inputs
"""
if type(nc_in) is str:
nc_in = Dataset(nc_in, 'r')
# check and get variables from netcdf
if file_type is None and variable is None:
raise Exception("file_type and variable both 'None': no data to convert!")
# initialize the IPW and set its some global attributes
ipw = IPW()
if file_type is None:
if variable == 'alt':
ipw.file_type = 'dem'
elif variable == 'mask':
ipw.file_type = variable
# this allows same lookup to be used for init or dem/mask
nc_vars = {variable: nc_in.variables[variable]}
else:
nc_vars = {varname: nc_in.variables[varname]
for varname in VARNAME_BY_FILETYPE[file_type]}
ipw.file_type = file_type
# read header info from nc and generate/assign to new IPW
# build global dict
ipw.byteorder = '0123' # TODO read from file
ipw.nlines = len(nc_in.dimensions['northing'])
ipw.nsamps = len(nc_in.dimensions['easting'])
# if the bands are not part of a group, they are handled individually
if file_type:
ipw.nbands = len(nc_vars)
else:
ipw.nbands = 1
globalBand = GlobalBand(ipw.byteorder, ipw.nlines,
ipw.nsamps, ipw.nbands)
# build non-global band(s). Can use recalculate_header so no min/max
# need be set.
# setting all values common to all bands
# use 2 bytes/16 bits for floating point values
bytes_ = NC_NBYTES
bits_ = NC_NBITS
bline = nc_in.bline
dline = nc_in.dline
bsamp = nc_in.bsamp
dsamp = nc_in.dsamp
geo_units = distance_units
coord_sys_ID = coord_sys_ID
# iterate over each item in VARNAME_BY_FILETYPE for the filetype, creating
# a "Band" for each and corresponding entry in the poorly named
# header_dict
varnames = VARNAME_BY_FILETYPE[ipw.file_type]
header_dict = dict(zip(varnames,
[Band() for i in range(len(varnames) + 1)]))
# create a dataframe with nrows = nlines*nsamps and variable colnames
df_shape = (ipw.nlines*ipw.nsamps, len(varnames))
df = DataFrame(zeros(df_shape), columns=varnames)
for idx, var in enumerate(varnames):
header_dict[var] = Band(varname=var, band_idx=idx, nBytes=bytes_,
nBits=bits_, int_max=NC_MAXINT, bline=bline, dline=dline,
bsamp=bsamp, dsamp=dsamp, units=geo_units,
coord_sys_ID=coord_sys_ID)
# insert data to each df column
if tstep is not None:
data = ravel(nc_vars[var][tstep])
else:
data = ravel(nc_vars[var])
df[var] = data
ipw._data_frame = df
ipw.nonglobal_bands = header_dict.values()
# include global band in header dictionary
header_dict.update({'global': globalBand})
ipw.geotransform = [bsamp - dsamp / 2.0,
dsamp,
0.0,
bline - dline / 2.0,
0.0,
dline]
ipw.bands = header_dict.values()
ipw.header_dict = header_dict
# recalculate headers
ipw.recalculate_header()
return ipw
def data_frame(self):
"""
Get the Pandas DataFrame representation of the IPW file
"""
if self._data_frame is None:
self._data_frame = \
_build_ipw_dataframe(self.nonglobal_bands,
self.binary_data)
return self._data_frame
def write(self, fileName):
"""
Write the IPW data to file
"""
last_line = "!<header> image -1 $Revision: 1.5 $"
with open(fileName, 'wb') as f:
header_lines = _bands_to_header_lines(self.header_dict)
for l in header_lines:
f.write(l + '\n')
f.write(last_line + '\n')
_write_floatdf_binstring_to_file(
self.nonglobal_bands, self._data_frame, f)
return None
def generate_standard_nc(base_dir, nc_out=None, inputs_dir='inputs',
dem_file='tl2p5_dem.ipw', mask_file='tl2p5mask.ipw',
init_file='init.ipw', ppt_desc_path='ppt_desc',
data_tstep=60,
output_frequency=1, dt='hours', year=2010, month=10,
day='01',hour='',event_emitter=None,**kwargs):
"""Use the utilities from netcdf.py to convert standard set of either input
or output files to a NetCDF4 file. A standard set of files means
for inputs:
- inputs/ dir with 5/6-band input files named like in.0000, in.0001
- ppt_desc file with time index of precip file and path to ppt file
- ppt_images_dist directory with the 4-band files from ppt_desc
- tl2p5mask.ipw and tl2p5_dem.ipw for mask and DEM images
- init.ipw 7-band initialization file
for outputs:
- an output/ directory with 9-band energy-mass (em) outputs and
snow outputs in time steps named like em.0000 and snow.0000
Arguments:
base_dir (str): base directory of the data
nc_out (str): path to write data to
Returns:
(netCDF4.Dataset) Representation of the data
"""
if 'outputs' in base_dir.split('/')[-1]:
ipw_type = 'outputs'
elif inputs_dir in listdir(base_dir):
ipw_type = 'inputs'
else:
raise IPWFileError("%s does not meet standards" % base_dir)
if ipw_type == 'inputs':
input_files = [osjoin(base_dir, inputs_dir, el) for el in
listdir(osjoin(base_dir, inputs_dir))]
ipw0 = IPW(input_files[0])
gt = ipw0.geotransform
gb = [x for x in ipw0.bands if type(x) is GlobalBand][0]
# in iSNOBAL speak, literally the number of steps, not number of
# time index entries
nsteps = len(input_files) - 1
template_args = dict(bline=gt[3], bsamp=gt[0], dline=gt[5],
dsamp=gt[1], nsamps=gb.nSamps, nlines=gb.nLines,
data_tstep=data_tstep, nsteps=nsteps,
output_frequency=output_frequency, dt=dt,
year=year, month=month, day=day, hour=hour)
# initialize the nc file
nc = ncgen_from_template('ipw_in_template.cdl', nc_out, clobber=True,
**template_args)
# first take care of non-precip files
with ProgressBar(maxval=len(input_files)) as progress:
for i, f in enumerate(input_files):
ipw = IPW(f)
tstep = int(basename(ipw.input_file).split('.')[-1])
_nc_insert_ipw(nc, ipw, tstep, gb.nLines, gb.nSamps)
progress.update(i)
kwargs['event_name'] = 'input_ipw_to_nc'
kwargs['event_description'] = 'creating nc form iw files'
kwargs['progress_value'] = format((float(i)/len(input_files)) * 100,'.2f')
if event_emitter:
event_emitter.emit('progress',**kwargs)
# dem, mask may not exist
dem_mask_init_list = []
try:
dem = IPW(osjoin(base_dir, dem_file), file_type='dem')
dem_mask_init_list.append(dem)
except:
warnings.warn("No dem file found in " + base_dir)
pass
try:
mask = IPW(osjoin(base_dir, mask_file), file_type='mask')
dem_mask_init_list.append(mask)
except:
warnings.warn("No mask file found in " + base_dir)
pass
init = IPW(osjoin(base_dir, init_file))
dem_mask_init_list.append(init)
for el in dem_mask_init_list:
_nc_insert_ipw(nc, el, None, gb.nLines, gb.nSamps)
# precipitation files
# read ppt_desc file and insert to nc with appropriate time step
# we do not explicitly set any value for zero-precip time steps
space_regex = re.compile('\s+')
ppt_pairs = [space_regex.split(ppt_line.strip()) # ppt_line.strip().split('\t')
for ppt_line in
open(osjoin(base_dir, ppt_desc_path), 'r').readlines()]
with ProgressBar(maxval=len(ppt_pairs)) as progress:
for i, ppt_pair in enumerate(ppt_pairs):
tstep = int(ppt_pair[0])
el = IPW(ppt_pair[1], file_type='precip')
_nc_insert_ipw(nc, el, tstep, gb.nLines, gb.nSamps)
progress.update(i)
kwargs['event_name'] = 'input_ipw_to_nc2'
kwargs['event_description'] = 'creating nc form iw files 2'
kwargs['progress_value'] = format((float(i)/len(ppt_pairs)) * 100,'.2f')
if event_emitter:
event_emitter.emit('progress',**kwargs)
kwargs['event_name'] = 'input_ipw_to_nc2'
kwargs['event_description'] = 'creating nc form iw files 2'
kwargs['progress_value'] = 100
if event_emitter:
event_emitter.emit('progress',**kwargs)
else:
output_files = [osjoin(base_dir, el) for el in listdir(base_dir)]
ipw0 = IPW(output_files[0])
gt = ipw0.geotransform
gb = [x for x in ipw0.bands if type(x) is GlobalBand][0]
nsteps = len(output_files)
template_args = dict(bline=gt[3], bsamp=gt[0], dline=gt[5],
dsamp=gt[1], nsamps=gb.nSamps, nlines=gb.nLines,
data_tstep=data_tstep, nsteps=nsteps,
output_frequency=output_frequency, dt=dt,
year=year, month=month, day=day)
# initialize nc file
nc = ncgen_from_template('ipw_out_template.cdl', nc_out, clobber=True,
**template_args)
logging.debug('creating output file')
with ProgressBar(maxval=len(output_files)) as progress:
for i, f in enumerate(output_files):
ipw = IPW(f)
tstep = int(basename(ipw.input_file).split('.')[-1])
_nc_insert_ipw(nc, ipw, tstep, gb.nLines, gb.nSamps)
progress.update(i)
kwargs['event_name'] = 'ouptut_ipw_to_nc'
kwargs['event_description'] = 'creating output netcdf file from output ipw files'
kwargs['progress_value'] = format((float(i)/len(output_files)) * 100,'.2f')
if event_emitter:
event_emitter.emit('progress',**kwargs)
kwargs['event_name'] = 'ouptut_ipw_to_nc'
kwargs['event_description'] = 'creating output nc file fro moutput ipw'
kwargs['progress_value'] = 100
if event_emitter:
event_emitter.emit('progress',**kwargs)
# whether inputs or outputs, we need to include the dimensional values
t = nc.variables['time']
t[:] = arange(len(t))
e = nc.variables['easting']
# eastings are "samples" in IPW
nsamps = len(e)
e[:] = array([nc.bsamp + nc.dsamp*i for i in range(nsamps)])
n = nc.variables['northing']
# northings are "lines" in IPW
nlines = len(n)
n[:] = array([nc.bline + nc.dline*i for i in range(nlines)])
# get a n_points x 2 array of lat/lon pairs at every point on the grid
latlon_arr = utm2latlon(nc.bsamp, nc.bline, nc.dsamp,
nc.dline, nsamps, nlines)
# break this out into lat and lon separately at each point on the grid
lat = nc.variables['lat']
lat[:] = reshape(latlon_arr[:, 0], (nlines, nsamps))
# break this out into lat and lon separately at each point on the grid
lon = nc.variables['lon']
lon[:] = reshape(latlon_arr[:, 1], (nlines, nsamps))
# finish setting attributes
nc.data_tstep = data_tstep
nc.nsteps = len(t)
nc.sync()
return nc
def _nc_insert_ipw(dataset, ipw, tstep, nlines, nsamps):
"""Put IPW data into dataset based on file naming conventions
Args:
dataset (NetCDF4.Dataset): Dataset to be populated
ipw (wcwave_adaptors.isnobal.IPW): source data in IPW format
tstep (int): Positive integer indicating the current time step
nlines (int): number of 'lines' in IPW file, aka n_northings
nsamps (int): number of 'samps' in IPW file, aka n_eastings
Returns:
None. `dataset` is populated in-place.
"""
file_type = ipw.file_type
df = ipw.data_frame()
variables = dataset.variables
if file_type == 'dem':
# dem only has 'alt' information, stored in root group
dataset.variables['alt'][:, :] = reshape(df['alt'],
(nlines, nsamps))
elif file_type == 'in':
for var in VARNAME_BY_FILETYPE['in']:
# can't just assign b/c if sun is 'down' var is absent from df
if var in df.columns:
variables[var][tstep, :, :] = reshape(df[var],
(nlines, nsamps))
else:
variables[var][tstep, :, :] = zeros((nlines, nsamps))
elif file_type == 'precip':
for var in VARNAME_BY_FILETYPE['precip']:
variables[var][tstep, :, :] = reshape(df[var], (nlines, nsamps))
elif file_type == 'mask':
# mask is binary and one-banded; store in root group
dataset.variables['mask'][:, :] = reshape(df['mask'],
(nlines, nsamps))
elif file_type == 'init':
for var in VARNAME_BY_FILETYPE['init']:
variables[var][:, :] = reshape(df[var], (nlines, nsamps))
elif file_type == 'em':
for var in VARNAME_BY_FILETYPE['em']:
variables[var][tstep, :, :] = reshape(df[var], (nlines, nsamps))
elif file_type == 'snow':
for var in VARNAME_BY_FILETYPE['snow']:
variables[var][tstep, :, :] = reshape(df[var], (nlines, nsamps))
# TODO file_type == "em" and "snow" for outputs
else:
raise Exception('File type %s not recognized!' % file_type)
def nc_to_standard_ipw(nc_in, ipw_base_dir, clobber=True, type_='inputs',
event_emitter=None, **kwargs):
"""Convert an iSNOBAL NetCDF file to an iSNOBAL standard directory structure
in IPW format. This means that for
isnobal input nc: all inputs are all in {ipw_base_dir}/inputs and all precip
files are in {ipw_base_dir}/ppt_images_dist. There is a precip
description file {ipw_base_dir}/ppt_desc describing what time index
each precipitation file corresponsds to and the path to the precip
file in ppt_images_dist. There are three more files, the mask, init,
and DEM files at {ipw_base_dir}/ tl2p5mask.ipw, tl2p5_dem.ipw, and
init.ipw
isnobal output nc: files get output to {ipw_base_dir}/outputs to allow for
building a directory of both inputs and outputs. Files are like
em.0000 and snow.0000 for energy-mass and snow outputs, respectively.
Arguments:
nc_in (str) path to input NetCDF file to break out
ipw_base_dir (str) location to store
Returns:
None
"""
if type(nc_in) is str:
nc_in = Dataset(nc_in, 'r')
else:
assert isinstance(nc_in, Dataset)
present_vars = set(nc_in.variables.keys())
expected_vars = set([
u'time', u'easting', u'northing', u'lat', u'lon',
u'alt', u'mask', 'I_lw', 'T_a', 'e_a', 'u', 'T_g', 'S_n',
'm_pp', 'percent_snow', 'rho_snow', 'T_pp',
'z', 'z_0', 'z_s', 'rho', 'T_s_0', 'T_s', 'h2o_sat']
)
assert not expected_vars.difference(present_vars), \
"%s not a valid input iSNOBAL NetCDF; %s are missing" \
% (nc_in.filepath(), expected_vars.difference(present_vars))
if clobber and exists(ipw_base_dir):
rmtree(ipw_base_dir)
elif exists(ipw_base_dir):
raise IPWFileError("clobber=False and %s exists" % ipw_base_dir)
mkdir(ipw_base_dir)
time_index = range(len(nc_in.variables['time']))
if type_ == 'inputs':
# for each time step create an IPW file
inputs_dir = osjoin(ipw_base_dir, 'inputs')
mkdir(inputs_dir)
tsteps = len(time_index)
zeropad_factor = floor(log10(tsteps))
file_type = 'in'
logging.debug('creating input ipw files for each timestep from the input netcdf file (stage 1)')
with ProgressBar(maxval=time_index[-1]) as progress:
if len(time_index) > 1:
for i, idx in enumerate(time_index):
if idx < 10:
idxstr = "0"*zeropad_factor + str(idx)
elif idx < 100:
idxstr = "0"*(zeropad_factor - 1) + str(idx)
elif idx < 1000:
idxstr = "0"*(zeropad_factor - 2) + str(idx)
else:
idxstr = str(idx)
IPW.from_nc(nc_in, tstep=idx, file_type=file_type,
).write(osjoin(inputs_dir, 'in.' + idxstr))
progress.update(i)
kwargs['event_name'] = 'processing_input'
kwargs['event_description'] = 'creating input ipw files for ' \
'each timestep from the input netcdf file (stage 1)'
kwargs['progress_value'] = format(
(float(i)/time_index[-1]) * 100, '.2f')
if event_emitter:
event_emitter.emit('progress', **kwargs)
else:
IPW.from_nc(nc_in, tstep=time_index[0], file_type=file_type,
).write(osjoin(inputs_dir, 'in'))
progress.update(i)
kwargs['event_name'] = 'processing_input'
kwargs['event_description'] = 'creating input ipw files for ' \
'each timestep from the input netcdf file (stage 1)'
kwargs['progress_value'] = format(
(float(i)/time_index[-1]) * 100, '.2f')
if event_emitter:
event_emitter.emit('progress', **kwargs)
kwargs['event_name'] = 'processing_input'
kwargs['event_description'] = \
'creating input ipw for each timestep form nc'
kwargs['progress_value'] = 100
if event_emitter:
event_emitter.emit('progress', **kwargs)
file_type = 'init'
IPW.from_nc(nc_in, file_type=file_type
).write(osjoin(ipw_base_dir, 'init.ipw'))
IPW.from_nc(nc_in, variable='alt'
).write(osjoin(ipw_base_dir, 'dem.ipw'))
IPW.from_nc(nc_in, variable='mask'
).write(osjoin(ipw_base_dir, 'mask.ipw'))
# precip is weird. for no precip tsteps, no IPW exists
# list of tsteps that had precip and associated
# files stored in ppt_desc
file_type = 'precip'
ppt_images_dir = osjoin(ipw_base_dir, 'ppt_images_dist')
mkdir(ppt_images_dir)
# can use just one variable (precip mass) to see which
mpp = nc_in.variables['m_pp'][:]
pctsnow = nc_in.variables['percent_snow'][:]
rhosnow = nc_in.variables['rho_snow'][:]
precip_temp = nc_in.variables['T_pp'][:]
# if no precip at a tstep, variable type is numpy.ma.core.MaskedArray
time_indexes = [i for i, el in enumerate(mpp)
if not (
(
(mpp[i].all() is masked) and
(pctsnow[i].all() is masked) and
(rhosnow[i].all() is masked) and
(precip_temp[i].all() is masked)
)
or
(
(mpp[i] > 1e6).all() and
(pctsnow[i] > 1e6).all() and
(rhosnow[i] > 1e6).all() and
(precip_temp[i] > 1e6).all()
)
)
]
# this should be mostly right except for ppt_desc and ppt data dir
with open(osjoin(ipw_base_dir, 'ppt_desc'), 'w') as ppt_desc:
logging.debug('creating input ipw files for each timestep from the input netcdf file (stage 2)')
with ProgressBar(maxval=len(time_indexes)) as progress:
for i, idx in enumerate(time_indexes):
ppt_desc.write("%s\t%s\n" % (idx,
osjoin(ppt_images_dir,
'ppt_' + str(idx) + '.ipw')))
ipw = IPW.from_nc(nc_in, tstep=idx, file_type=file_type)
ipw.write(osjoin(ppt_images_dir,
'ppt_' + str(idx) + '.ipw'))
progress.update(i)
kwargs['event_name'] = 'processing_input2'
kwargs['event_description'] = 'creating input ipw files for each timestep from the input netcdf file (stage 2)'
kwargs['progress_value'] = format((float(i)/len(time_indexes)) * 100, '.2f')
if event_emitter:
event_emitter.emit('progress',**kwargs)
kwargs['event_name'] = 'processing_input2'
kwargs['event_description'] = 'creating input ipw for each timestep form nc 2'
kwargs['progress_value'] = 100
if event_emitter:
event_emitter.emit('progress',**kwargs)
else:
raise Exception("NetCDF to IPW converter not implemented for type %s" %
type_)
def metadata_from_ipw(ipw, output_file, parent_model_run_uuid, model_run_uuid,
description, model_set=None):
"""
Create the metadata for the IPW object, even if it doesn't existed as
a file on disk.
WARNING: Does not check that output_file exists. Should be used when, e.g.,
a re-sampled IPW file or geotiff is being created and saved, and the
metadata also needs to be created and either saved or sent to the
waterhsed.
Returns: None
"""
fgdc_metadata = make_fgdc_metadata(output_file,
ipw.config, model_run_uuid)
input_prefix = output_file.split('.')[0]
if model_set is None:
model_set = ("outputs", "inputs")[input_prefix == "in"]
return make_watershed_metadata(output_file,
ipw.config,
parent_model_run_uuid,
model_run_uuid,
model_set,
description,
ipw.model_vars,
fgdc_metadata,
ipw.start_datetime,
ipw.end_datetime)
def reaggregate_ipws(ipws, fun=npsum, freq='H', rule='D'):
"""
Resample IPWs using the function fun, but only sum is supported.
`freq` corresponds to the actual frequency of the ipws; rule corresponds to
one of the resampling 'rules' given here:
http://pandas.pydata.org/pandas-docs/dev/timeseries.html#time-date-components
"""
assert fun is npsum, "Cannot use " + fun.func_name + \
", only sum has been implemented"
assert _is_consecutive(ipws)
ipw0 = ipws[0]
start_datetime = ipw0.start_datetime
idx = date_range(start=start_datetime, periods=len(ipws), freq=freq)
series = Series(map(lambda ipw: ipw.data_frame(), ipws), index=idx)
resampled = series.resample(rule, how=npsum)
resampled_idx = resampled.index
resampled_dt = resampled_idx[1] - resampled_idx[0]
resampled_ipws = [IPW() for el in resampled]
header_dict = deepcopy(ipw0.header_dict)
file_type = ipw0.file_type
# bands = deepcopy(ipw0.bands)
bands = ipw0.bands
# nonglobal_bands = deepcopy(ipw0.nonglobal_bands)
nonglobal_bands = ipw0.nonglobal_bands
geotransform = ipw0.geotransform
for ipw_idx, ipw in enumerate(resampled_ipws):
ipw._data_frame = resampled[ipw_idx]
ipw.start_datetime = resampled_idx[ipw_idx]
ipw.end_datetime = resampled_idx[ipw_idx] + resampled_dt
ipw.header_dict = deepcopy(header_dict)
ipw.file_type = file_type
ipw.bands = deepcopy(bands)
ipw.nonglobal_bands = deepcopy(nonglobal_bands)
ipw.geotransform = geotransform
ipw.recalculate_header()
return resampled_ipws
def _is_consecutive(ipws):
"""
Check that a list of ipws is consecutive
"""
ret = True
ipw_prev = ipws[0]
for ipw in ipws[1:]:
ret &= ipw_prev.end_datetime == ipw.start_datetime
ipw_prev = ipw
return ret
def _build_ipw_dataframe(nonglobal_bands, binary_data):
"""
Build a pandas DataFrame using header info to assign column names
"""
colnames = [b.varname for b in nonglobal_bands]
dtype = _bands_to_dtype(nonglobal_bands)
intData = fromstring(binary_data, dtype=dtype)
df = DataFrame(intData, columns=colnames)
for b in nonglobal_bands:
df[b.varname] = _calc_float_value(b, df[b.varname])
return df
def _make_bands(header_lines, varnames):
"""
Make a header dictionary that points to Band objects for each variable
name.
Returns: dict
"""
globalEndIdx = 0
# parse global information from global header
for i, l in enumerate(header_lines[1:-1]):
if IsHeaderStart(l):
globalEndIdx = i
break
global_header_lines = header_lines[1:globalEndIdx+1]
# tried a prettier dictionary comprehension, but wouldn't fly
global_band_dict = defaultdict(int)
for l in global_header_lines:
if l:
spl = l.strip().split()
if spl[0] == 'byteorder':
global_band_dict[spl[0]] = spl[2]
else:
global_band_dict[spl[0]] = int(spl[2])
# these are the standard names in an ISNOBAL header file
byteorder = global_band_dict['byteorder']
nLines = global_band_dict['nlines']
nSamps = global_band_dict['nsamps']
nBands = global_band_dict['nbands']
# this will be put into the return dictionary at the return statement
globalBand = GlobalBand(byteorder, nLines, nSamps, nBands)
# initialize a list of bands to put parsed information into
bands = [Band() for i in range(nBands)]
for i, b in enumerate(bands):
b.varname = varnames[i]
b.band_idx = i
band_type = None
band_idx = None
geo_parsed = False
ref_band = Band()
geo_count = 0
for line in header_lines[globalEndIdx:]:
spl = line.strip().split()
attr = spl[0]
if IsHeaderStart(line):
band_type = spl[BAND_TYPE_LOC]
band_idx = int(spl[BAND_INDEX_LOC])
lqCounter = 0
if band_type == 'geo':
geo_count += 1
if geo_count == 2:
geo_parsed = True
elif band_type == 'basic_image':
# assign byte and bits info that's stored here
if attr in ['bits', 'bytes']:
setattr(bands[band_idx], attr + "_", int(spl[2]))
elif band_type == 'lq':
# assign integer and float min and max. ignore non-"map" fields
if attr == "map":
# minimum values are listed first by IPW
if lqCounter == 0:
bands[band_idx].int_min = float(spl[2])
bands[band_idx].float_min = float(spl[3])
lqCounter += 1
elif lqCounter == 1:
bands[band_idx].int_max = float(spl[2])
bands[band_idx].float_max = float(spl[3])
elif band_type == 'geo':
# Not all bands have geo information. The ones that do are
# expected to be redundant. Check that all available are equal
# and for any that don't have geo information, set them to the
# geo information
if not geo_parsed:
if attr in ["bline", "bsamp", "dline", "dsamp"]:
setattr(ref_band, attr, float(spl[2]))
# setattr(bands[band_idx], attr, float(spl[2]))
elif attr in ["units", "coord_sys_ID"]:
if attr == "units":
attr = "geo_units"
setattr(ref_band, attr, spl[2])
# setattr(bands[band_idx], attr, spl[2])
else:
raise Exception(
"'geo' attribute %s from IPW file not recognized!" %
attr)
else:
if attr == "units":
attr = "geo_units"
assert\
getattr(ref_band, attr) == getattr(bands[band_idx], attr)
# now set all bands to the reference band
for band in bands:
band.bline = ref_band.bline
band.bsamp = ref_band.bsamp
band.dline = ref_band.dline
band.dsamp = ref_band.dsamp
band.geo_units = ref_band.geo_units
band.coord_sys_ID = ref_band.coord_sys_ID
return dict(zip(['global']+varnames[:nBands], [globalBand]+bands))
def _calc_float_value(band, integerValue):
"""
Calculate a floating point value for the integer int_ given the min/max int
and min/max floats in the given bandObj
Returns: Floating point value of the mapped int_
"""
floatRange = band.float_max - band.float_min
return integerValue * (floatRange / band.int_max) + band.float_min
def _bands_to_dtype(bands):
"""
Given a list of Bands, convert them to a numpy.dtype for use in creating
the IPW dataframe.
"""
return dtype([(b.varname, 'uint' + str(b.bytes_ * 8)) for b in bands])
def _bands_to_header_lines(bands_dict):
"""
Convert the bands to a new header assuming the float ranges are up to date
for the current dataframe, df.
"""
firstLine = "!<header> basic_image_i -1 $Revision: 1.11 $"
global_ = bands_dict['global']
firstLines = [firstLine,
"byteorder = {0} ".format(global_.byteorder),
"nlines = {0} ".format(global_.nLines),
"nsamps = {0} ".format(global_.nSamps),
"nbands = {0} ".format(global_.nBands)]
other_lines = []
bands = [b for varname, b in bands_dict.iteritems() if varname != 'global']
bands = sorted(bands, key=lambda b: b.band_idx)
# for some reason IPW has a space at the end of data lines
for i, b in enumerate(bands):
other_lines += ["!<header> basic_image {0} $Revision: 1.11 $".format(i),
"bytes = {0} ".format(b.bytes_),
"bits = {0} ".format(b.bits_)]
# build the linear quantization (lq) headers
for i, b in enumerate(bands):
int_min = int(b.int_min)
int_max = int(b.int_max)
# IPW writes integer floats without a dec point, so remove if necessary
float_min = \
(b.float_min, int(b.float_min))[b.float_min == int(b.float_min)]
float_max = \
(b.float_max, int(b.float_max))[b.float_max == int(b.float_max)]
other_lines += ["!<header> lq {0} $Revision: 1.6 $".format(i),
"map = {0} {1} ".format(int_min, float_min),
"map = {0} {1} ".format(int_max, float_max)]
# import ipdb; ipdb.set_trace()
# build the geographic header
for i, b in enumerate(bands):
bline = b.bline
bsamp = b.bsamp
dline = b.dline
dsamp = b.dsamp
units = b.geo_units
coord_sys_ID = b.coord_sys_ID
other_lines += ["!<header> geo {0} $Revision: 1.7 $".format(i),
"bline = {0} ".format(bline),
"bsamp = {0} ".format(bsamp),
"dline = {0} ".format(dline),
"dsamp = {0} ".format(dsamp),
"units = {0} ".format(units),
"coord_sys_ID = {0} ".format(coord_sys_ID)]
return firstLines + other_lines
def _write_floatdf_binstring_to_file(bands, df, write_file):
"""
Convert the dataframe floating point data to a binary string.
Arguments:
bands: list of Band objects
df: dataframe to be written
write_file: File object ready for writing to
"""
# first convert df to an integer dataframe
int_df = DataFrame(dtype='uint64')
for b in sorted(bands, key=lambda b: b.band_idx):
# check that bands are appropriately made, that b.Max/Min really are
assert df[b.varname].le(b.float_max).all(), \
"Bad band: max not really max.\nb.float_max = %2.10f\n \
df[b.varname].max() = %s" % (b.float_max, df[b.varname].max())
assert df[b.varname].ge(b.float_min).all(), \
"Bad band: min not really min.\nb.float_min = %s\n \
df[b.varname].min() = %2.10f" % (b.float_min, df[b.varname].min())
def _map_fn(x):
if b.float_max - b.float_min == 0.0:
return 0.0
else:
return floor(npround(
((x - b.float_min) * b.int_max)/(b.float_max - b.float_min)
))
int_df[b.varname] = _map_fn(df[b.varname])
# use the struct package to pack ints to bytes; use '=' to prevent padding
# that causes problems with the IPW scheme
# pack_str = "=" + "".join([PACK_DICT[b.bytes_] for b in bands])
int_mat = int_df.as_matrix()
pack_str = "=" + "".join([PACK_DICT[b.bytes_] for b in bands])*len(int_mat)
# for row_idx in range(len(int_mat)):
flat_mat = int_mat.flatten()
write_file.write(pack(pack_str, *flat_mat))
def _recalculate_header(bands, dataframe):
"""
Recalculate the minimum and maximum of each band in bands given a dataframe
that contains data for each band.
Returns: None
"""
assert set(list(dataframe.columns)) == set([b.varname for b in bands]), \
"DataFrame column names do not match bands' variable names!"
for band in bands:
band.float_min = dataframe[band.varname].min()
band.float_max = dataframe[band.varname].max()
if band.float_min == band.float_max:
band.float_max = band.float_min + 1.0
return None
class Band(object):
"""
Container for band information
"""
def __init__(self, varname="", band_idx=0, nBytes=0, nBits=0, int_min=0.0,
int_max=0.0, float_min=0.0, float_max=0.0,
bline=0.0, bsamp=0.0, dline=0.0, dsamp=0.0,
units="meters", coord_sys_ID="UTM"):
"""
Can either pass this information or create an all-None Band.
"""
self.varname = varname
self.band_idx = band_idx
self.bytes_ = nBytes
self.bits_ = nBits
self.int_min = float(int_min)
self.int_max = float(int_max)
self.float_min = float(float_min)
self.float_max = float(float_max)
self.bline = float(bline)
self.bsamp = float(bsamp)
self.dline = float(dline)
self.dsamp = float(dsamp)
assert type(units) is str
self.geo_units = units
assert type(coord_sys_ID) is str
self.coord_sys_ID = coord_sys_ID
def __str__(self):
return "-- " + self.varname + " --\n" +\
"".join([attr + ": " + str(value) + "\n"
for attr, value in
self.__dict__.iteritems()])
class IPWLines(object):
"""
Data structure to wrap header and binary parts of an IPW file.
Arguments: ipwFile -- file name pointing to an IPW file
"""
def __init__(self, ipw_file):
with open(ipw_file, 'rb') as f:
lines = f.readlines()
last_header_idx = \
[(i, l) for i, l in enumerate(lines) if "" in l][0][0]
split_idx = last_header_idx + 1
self.header_lines = lines[:split_idx]
self.binary_data = "".join(lines[split_idx:])
class IPWFileError(Exception):
pass
class ISNOBALNetcdfError(Exception):
pass
| bsd-2-clause |
grehx/spark-tk | regression-tests/sparktkregtests/testcases/frames/unflatten_test.py | 1 | 6554 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests unflatten functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
class Unflatten(sparktk_test.SparkTKTestCase):
def setUp(self):
super(Unflatten, self).setUp()
self.datafile_unflatten = self.get_file("unflatten_data_no_spaces.csv")
self.schema_unflatten = [("user", str),
("day", str),
("time", str),
("reading", int)]
def test_unflatten_one_column(self):
""" test for unflatten comma-separated rows """
frame = self.context.frame.import_csv(
self.datafile_unflatten, schema=self.schema_unflatten)
# get as a pandas frame to access data
pandas_frame = frame.to_pandas(frame.count())
# use this dictionary to store expected results by name
name_lookup = {}
# generate our expected results by unflattening
# each row ourselves and storing it by name
for index, row in pandas_frame.iterrows():
# if the name is not already in name lookup
# index 0 refers to user name (see schema above)
if row['user'] not in name_lookup:
name_lookup[row['user']] = row
else:
row_copy = name_lookup[row['user']]
# append each item in the row to a comma
# delineated string
for index in range(1, len(row_copy)):
row_copy[index] = str(
row_copy[index]) + "," + str(row[index])
name_lookup[row['user']] = row_copy
# now we unflatten the columns using sparktk
frame.unflatten_columns(['user'])
# finally we iterate through what we got
# from sparktk unflatten and compare
# it to the expected results we created
unflatten_pandas = frame.to_pandas()
for index, row in unflatten_pandas.iterrows():
self.assertEqual(row['user'], name_lookup[row['user']]['user'])
self.assertEqual(row['day'], name_lookup[row['user']]['day'])
self.assertItemsEqual(
row['time'].split(','),
name_lookup[row['user']]['time'].split(','))
self.assertItemsEqual(
row['reading'].split(','),
name_lookup[row['user']]['reading'].split(','))
self.assertEqual(frame.count(), 5)
def test_unflatten_multiple_cols(self):
frame = self.context.frame.import_csv(
self.datafile_unflatten, schema=self.schema_unflatten)
# get a pandas frame of the data
pandas_frame = frame.to_pandas(frame.count())
name_lookup = {}
# same logic as for unflatten_one_column,
# generate our expected results by unflattening
for index, row in pandas_frame.iterrows():
if row['user'] not in name_lookup:
name_lookup[row['user']] = row
else:
row_copy = name_lookup[row['user']]
for index in range(1, len(row_copy)):
# then only difference between multi col and single col
# is that we will expect any data in the column at index 1
# (which is day, see the schema above)
# to also be flattened
# so we only append data if it isn't already there
if index is not 1 or str(
row[index]) not in str(row_copy[index]):
row_copy[index] = str(
row_copy[index]) + "," + str(row[index])
name_lookup[row['user']] = row_copy
# now we unflatten using sparktk
frame.unflatten_columns(['user', 'day'])
# and compare our expected data with sparktk results
# which we have taken as a pandas frame
unflatten_pandas = frame.to_pandas()
for index, row in unflatten_pandas.iterrows():
self.assertEqual(row['user'], name_lookup[row['user']]['user'])
self.assertEqual(row['day'], name_lookup[row['user']]['day'])
self.assertItemsEqual(
row['time'].split(','),
name_lookup[row['user']]['time'].split(','))
self.assertItemsEqual(
row['reading'].split(','),
name_lookup[row['user']]['reading'].split(','))
# same logic as single column but with sparse data
# because there are so many rows in this data
# (datafile contains thousands and thousands of lines)
# we will not compare the content of the unflatten frame
# as this would require us to iterate multiple times
def test_unflatten_sparse_data(self):
datafile_unflatten_sparse = self.get_file("unflatten_data_sparse.csv")
schema_unflatten_sparse = [("user", int),
("day", str),
("time", str),
("reading", str)]
frame_sparse = self.context.frame.import_csv(
datafile_unflatten_sparse, schema=schema_unflatten_sparse)
frame_sparse.unflatten_columns(['user'])
pandas_frame_sparse = frame_sparse.to_pandas()
# since this data set is huge we will just iterate once
# to make sure the data has been appended for time and
# reading for each row and that there are the correct
# number of items that we would expect for the
# unflattened frame
for index, row in pandas_frame_sparse.iterrows():
self.assertEqual(
len(str(row['time']).split(",")),
len(str(row['reading']).split(",")))
self.assertEqual(frame_sparse.count(), 100)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
oemof/demandlib | demandlib/examples/power_demand_example.py | 1 | 3770 | # -*- coding: utf-8 -*-
"""
Creating power demand profiles using bdew profiles.
Installation requirements
-------------------------
This example requires at least version v0.1.4 of the oemof demandlib. Install
by:
pip install 'demandlib>=0.1.4,<0.2'
Optional:
pip install matplotlib
SPDX-FileCopyrightText: Birgit Schachler
SPDX-FileCopyrightText: Uwe Krien <krien@uni-bremen.de>
SPDX-FileCopyrightText: Stephen Bosch
SPDX-License-Identifier: MIT
"""
import datetime
import demandlib.bdew as bdew
import demandlib.particular_profiles as profiles
from datetime import time as settime
try:
import matplotlib.pyplot as plt
except ImportError:
print("Install the matplotlib to see the plots.")
plt = None
# The following dictionary is create by "workalendar"
# pip3 install workalendar
# >>> from workalendar.europe import Germany
# >>> cal = Germany()
# >>> holidays = dict(cal.holidays(2010))
holidays = {
datetime.date(2010, 5, 24): 'Whit Monday',
datetime.date(2010, 4, 5): 'Easter Monday',
datetime.date(2010, 5, 13): 'Ascension Thursday',
datetime.date(2010, 1, 1): 'New year',
datetime.date(2010, 10, 3): 'Day of German Unity',
datetime.date(2010, 12, 25): 'Christmas Day',
datetime.date(2010, 5, 1): 'Labour Day',
datetime.date(2010, 4, 2): 'Good Friday',
datetime.date(2010, 12, 26): 'Second Christmas Day'}
def power_example(
ann_el_demand_per_sector=None,
testmode=False):
if ann_el_demand_per_sector is None:
ann_el_demand_per_sector = {
'g0': 3000,
'h0': 3000,
'i0': 3000,
'i1': 5000,
'i2': 6000,
'g6': 5000}
year = 2010
# read standard load profiles
e_slp = bdew.ElecSlp(year, holidays=holidays)
# multiply given annual demand with timeseries
elec_demand = e_slp.get_profile(ann_el_demand_per_sector,
dyn_function_h0=False)
# Add the slp for the industrial group
ilp = profiles.IndustrialLoadProfile(e_slp.date_time_index,
holidays=holidays)
# Beginning and end of workday, weekdays and weekend days, and scaling
# factors by default
if 'i0' in ann_el_demand_per_sector:
elec_demand['i0'] = ilp.simple_profile(ann_el_demand_per_sector['i0'])
# Set beginning of workday to 9 am
if 'i1' in ann_el_demand_per_sector:
elec_demand['i1'] = ilp.simple_profile(ann_el_demand_per_sector['i1'],
am=settime(9, 0, 0))
# Change scaling factors
if 'i2' in ann_el_demand_per_sector:
elec_demand['i2'] = ilp.simple_profile(
ann_el_demand_per_sector['i2'],
profile_factors={'week': {'day': 1.0, 'night': 0.8},
'weekend': {'day': 0.8, 'night': 0.6}})
if not testmode:
print("Be aware that the values in the DataFrame are 15 minute values"
+ "with a power unit. If you sum up a table with 15min values"
+ "the result will be of the unit 'kW15minutes'.")
print(elec_demand.sum())
print("You will have to divide the result by 4 to get kWh.")
print(elec_demand.sum() / 4)
print("Or resample the DataFrame to hourly values using the mean() "
"method.")
# Resample 15-minute values to hourly values.
elec_demand = elec_demand.resample('H').mean()
print(elec_demand.sum())
if plt is not None:
# Plot demand
ax = elec_demand.plot()
ax.set_xlabel("Date")
ax.set_ylabel("Power demand")
plt.show()
return elec_demand
if __name__ == '__main__':
print(power_example())
| mit |
zuku1985/scikit-learn | sklearn/model_selection/tests/test_split.py | 12 | 47658 | """Test the split module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from scipy import stats
from scipy.misc import comb
from itertools import combinations
from itertools import combinations_with_replacement
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils.mocking import MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import check_cv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
from sklearn.model_selection._split import _validate_shuffle_split
from sklearn.model_selection._split import _CVIterableWrapper
from sklearn.model_selection._split import _build_repr
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.svm import SVC
X = np.ones(10)
y = np.arange(10) // 2
P_sparse = coo_matrix(np.eye(5))
test_groups = (
np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'])
digits = load_digits()
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
@ignore_warnings
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, test_size=0.1, "
"train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, X, y, groups, expected_n_splits=None):
n_samples = _num_samples(X)
# Check that a all the samples appear at least once in a test fold
if expected_n_splits is not None:
assert_equal(cv.get_n_splits(X, y, groups), expected_n_splits)
else:
expected_n_splits = cv.get_n_splits(X, y, groups)
collected_test_samples = set()
iterations = 0
for train, test in cv.split(X, y, groups):
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_splits)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
X1 = np.array([[1, 2], [3, 4], [5, 6]])
X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, next, KFold(4).split(X1))
# Check that a warning is raised if the least populated class has too few
# members.
y = np.array([3, 3, -1, -1, 3])
skf_3 = StratifiedKFold(3)
assert_warns_message(Warning, "The least populated class",
next, skf_3.split(X2, y))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
# Check that errors are raised if all n_groups for individual
# classes are less than n_splits.
y = np.array([3, 3, -1, -1, 2])
assert_raises(ValueError, next, skf_3.split(X2, y))
# Error when number of folds is <= 1
assert_raises(ValueError, KFold, 0)
assert_raises(ValueError, KFold, 1)
error_string = ("k-fold cross-validation requires at least one"
" train/test split")
assert_raise_message(ValueError, error_string,
StratifiedKFold, 0)
assert_raise_message(ValueError, error_string,
StratifiedKFold, 1)
# When n_splits is not integer:
assert_raises(ValueError, KFold, 1.5)
assert_raises(ValueError, KFold, 2.0)
assert_raises(ValueError, StratifiedKFold, 1.5)
assert_raises(ValueError, StratifiedKFold, 2.0)
# When shuffle is not a bool:
assert_raises(TypeError, KFold, n_splits=4, shuffle=None)
def test_kfold_indices():
# Check all indices are returned in the test folds
X1 = np.ones(18)
kf = KFold(3)
check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
X2 = np.ones(17)
kf = KFold(3)
check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
# Check if get_n_splits returns the number of folds
assert_equal(5, KFold(5).get_n_splits(X2))
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
splits = KFold(2).split(X2[:-1])
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = KFold(2).split(X2)
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
X, y = np.ones(4), [1, 1, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
# Check if get_n_splits returns the number of folds
assert_equal(5, StratifiedKFold(5).get_n_splits(X, y))
# Make sure string labels are also supported
X = np.ones(7)
y1 = ['1', '1', '1', '0', '0', '0', '0']
y2 = [1, 1, 1, 0, 0, 0, 0]
np.testing.assert_equal(
list(StratifiedKFold(2).split(X, y1)),
list(StratifiedKFold(2).split(X, y2)))
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves class ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
X = np.ones(n_samples)
y = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in (False, True):
for train, test in StratifiedKFold(5, shuffle=shuffle).split(X, y):
assert_almost_equal(np.sum(y[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(y[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(y[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(y[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(y[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(y[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for i in range(11, 17):
kf = KFold(5).split(X=np.ones(i))
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
X = np.ones(17)
y = [0] * 3 + [1] * 14
for shuffle in (True, False):
cv = StratifiedKFold(3, shuffle=shuffle)
for i in range(11, 17):
skf = cv.split(X[:i], y[:i])
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert_equal(sum(all_folds), 300)
def test_shuffle_kfold_stratifiedkfold_reproducibility():
# Check that when the shuffle is True multiple split calls produce the
# same split when random_state is set
X = np.ones(15) # Divisible by 3
y = [0] * 7 + [1] * 8
X2 = np.ones(16) # Not divisible by 3
y2 = [0] * 8 + [1] * 8
kf = KFold(3, shuffle=True, random_state=0)
skf = StratifiedKFold(3, shuffle=True, random_state=0)
for cv in (kf, skf):
np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y)))
np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2)))
kf = KFold(3, shuffle=True)
skf = StratifiedKFold(3, shuffle=True)
for cv in (kf, skf):
for data in zip((X, X2), (y, y2)):
try:
np.testing.assert_equal(list(cv.split(*data)),
list(cv.split(*data)))
except AssertionError:
pass
else:
raise AssertionError("The splits for data, %s, are same even "
"when random state is not set" % data)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X_40 = np.ones(40)
y = [0] * 20 + [1] * 20
kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
for (_, test0), (_, test1) in zip(kf0.split(X_40, y),
kf1.split(X_40, y)):
assert_not_equal(set(test0), set(test1))
check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact by computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.93) than that the non
# shuffling variant (around 0.81).
X, y = digits.data[:600], digits.target[:600]
model = SVC(C=10, gamma=0.005)
n_splits = 3
cv = KFold(n_splits=n_splits, shuffle=False)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.92, mean_score)
assert_greater(mean_score, 0.80)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = KFold(n_splits, shuffle=True, random_state=0)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
cv = KFold(n_splits, shuffle=True, random_state=1)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = StratifiedKFold(n_splits)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.93, mean_score)
assert_greater(mean_score, 0.80)
def test_shuffle_split():
ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
for typ in six.integer_types:
ss4 = ShuffleSplit(test_size=typ(2), random_state=0).split(X)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
X = np.arange(7)
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.2).split(X, y))
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 3, 2).split(X, y))
X = np.arange(9)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.6, 8).split(X, y))
# Train size or test size too small
assert_raises(ValueError, next,
StratifiedShuffleSplit(train_size=2).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(test_size=2).split(X, y))
def test_stratified_shuffle_split_respects_test_size():
y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
test_size = 5
train_size = 10
sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size,
random_state=0).split(np.ones(len(y)), y)
for train, test in sss:
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50),
np.concatenate([[i] * (100 + i) for i in range(11)]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'],
]
for y in ys:
sss = StratifiedShuffleSplit(6, test_size=0.33,
random_state=0).split(np.ones(len(y)), y)
y = np.asanyarray(y) # To make it indexable for y[train]
# this is how test-size is computed internally
# in _validate_shuffle_split
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert_true(prob > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
groups = np.array((n_samples // 2) * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for train, test in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits_actual, n_splits)
n_train, n_test = _validate_shuffle_split(
n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds))
assert_equal(len(train), n_train)
assert_equal(len(test), n_test)
assert_equal(len(set(train).intersection(test)), 0)
group_counts = np.unique(groups)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(n_train + n_test, len(groups))
assert_equal(len(group_counts), 2)
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
y = [0, 1, 2, 3] * 3 + [4, 5] * 5
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1,
test_size=0.5, random_state=0)
train, test = next(iter(sss.split(X=X, y=y)))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = PredefinedSplit(folds)
# n_splits is simply the no of unique folds
assert_equal(len(np.unique(folds)), ps.get_n_splits())
for train_ind, test_ind in ps.split():
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_group_shuffle_split():
for groups_i in test_groups:
X = y = np.ones(len(groups_i))
n_splits = 6
test_size = 1./3
slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(slo.get_n_splits(X, y, groups=groups_i), n_splits)
l_unique = np.unique(groups_i)
l = np.asarray(groups_i)
for train, test in slo.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
l_train_unique = np.unique(l[train])
l_test_unique = np.unique(l[test])
assert_false(np.any(np.in1d(l[train], l_test_unique)))
assert_false(np.any(np.in1d(l[test], l_train_unique)))
# Second test: train and test add up to all the data
assert_equal(l[train].size + l[test].size, l.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test:
# unique train and test groups are correct, +- 1 for rounding error
assert_true(abs(len(l_test_unique) -
round(test_size * len(l_unique))) <= 1)
assert_true(abs(len(l_train_unique) -
round((1.0 - test_size) * len(l_unique))) <= 1)
def test_leave_one_p_group_out():
logo = LeaveOneGroupOut()
lpgo_1 = LeavePGroupsOut(n_groups=1)
lpgo_2 = LeavePGroupsOut(n_groups=2)
# Make sure the repr works
assert_equal(repr(logo), 'LeaveOneGroupOut()')
assert_equal(repr(lpgo_1), 'LeavePGroupsOut(n_groups=1)')
assert_equal(repr(lpgo_2), 'LeavePGroupsOut(n_groups=2)')
assert_equal(repr(LeavePGroupsOut(n_groups=3)),
'LeavePGroupsOut(n_groups=3)')
for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1),
(lpgo_2, 2))):
for i, groups_i in enumerate(test_groups):
n_groups = len(np.unique(groups_i))
n_splits = (n_groups if p_groups_out == 1
else n_groups * (n_groups - 1) / 2)
X = y = np.ones(len(groups_i))
# Test that the length is correct
assert_equal(cv.get_n_splits(X, y, groups=groups_i), n_splits)
groups_arr = np.asarray(groups_i)
# Split using the original list / array / list of string groups_i
for train, test in cv.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
assert_array_equal(np.intersect1d(groups_arr[train],
groups_arr[test]).tolist(),
[])
# Second test: train and test add up to all the data
assert_equal(len(train) + len(test), len(groups_i))
# Third test:
# The number of groups in test must be equal to p_groups_out
assert_true(np.unique(groups_arr[test]).shape[0], p_groups_out)
def test_leave_group_out_changing_groups():
# Check that LeaveOneGroupOut and LeavePGroupsOut work normally if
# the groups variable is changed before calling split
groups = np.array([0, 1, 2, 1, 1, 2, 0, 0])
X = np.ones(len(groups))
groups_changing = np.array(groups, copy=True)
lolo = LeaveOneGroupOut().split(X, groups=groups)
lolo_changing = LeaveOneGroupOut().split(X, groups=groups)
lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
groups_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
# n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3
assert_equal(
3, LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X,
groups=groups))
# n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups)
assert_equal(3, LeaveOneGroupOut().get_n_splits(X, y=X,
groups=groups))
def test_leave_one_p_group_out_error_on_fewer_number_of_groups():
X = y = groups = np.ones(0)
assert_raise_message(ValueError, "Found array with 0 sample(s)", next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than 2 unique groups ([ 1.]). "
"LeaveOneGroupOut expects at least 2.")
assert_raise_message(ValueError, msg, next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ([ 1.]). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups be present")
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
X = y = groups = np.arange(3)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ([0 1 2]). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups be present")
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
def test_train_test_split_errors():
assert_raises(ValueError, train_test_split)
assert_raises(ValueError, train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# don't convert lists to anything else by default
split = train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = train_test_split(y, test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
@ignore_warnings
def train_test_split_pandas():
# check train_test_split doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_sparse():
# check that train_test_split converts scipy sparse matrices
# to csr, as stated in the documentation
X = np.arange(100).reshape((10, 10))
sparse_types = [csr_matrix, csc_matrix, coo_matrix]
for InputFeatureType in sparse_types:
X_s = InputFeatureType(X)
X_train, X_test = train_test_split(X_s)
assert_true(isinstance(X_train, csr_matrix))
assert_true(isinstance(X_test, csr_matrix))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = train_test_split(X_df)
def train_test_split_list_input():
# Check that when y is a list / list of string labels, it works.
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
for stratify in (True, False):
X_train1, X_test1, y_train1, y_test1 = train_test_split(
X, y1, stratify=y1 if stratify else None, random_state=0)
X_train2, X_test2, y_train2, y_test2 = train_test_split(
X, y2, stratify=y2 if stratify else None, random_state=0)
X_train3, X_test3, y_train3, y_test3 = train_test_split(
X, y3, stratify=y3 if stratify else None, random_state=0)
np.testing.assert_equal(X_train1, X_train2)
np.testing.assert_equal(y_train2, y_train3)
np.testing.assert_equal(X_test1, X_test3)
np.testing.assert_equal(y_test3, y_test2)
def test_shufflesplit_errors():
# When the {test|train}_size is a float/invalid, error is raised at init
assert_raises(ValueError, ShuffleSplit, test_size=None, train_size=None)
assert_raises(ValueError, ShuffleSplit, test_size=2.0)
assert_raises(ValueError, ShuffleSplit, test_size=1.0)
assert_raises(ValueError, ShuffleSplit, test_size=0.1, train_size=0.95)
assert_raises(ValueError, ShuffleSplit, train_size=1j)
# When the {test|train}_size is an int, validation is based on the input X
# and happens at split(...)
assert_raises(ValueError, next, ShuffleSplit(test_size=11).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=10).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=8,
train_size=3).split(X))
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = ShuffleSplit(random_state=21)
assert_array_equal(list(a for a, b in ss.split(X)),
list(a for a, b in ss.split(X)))
def test_stratifiedshufflesplit_list_input():
# Check that when y is a list / list of string labels, it works.
sss = StratifiedShuffleSplit(test_size=2, random_state=42)
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
np.testing.assert_equal(list(sss.split(X, y1)),
list(sss.split(X, y2)))
np.testing.assert_equal(list(sss.split(X, y3)),
list(sss.split(X, y2)))
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
train_test_split(X, y, test_size=0.2, random_state=42)
def test_check_cv():
X = np.ones(9)
cv = check_cv(3, classifier=False)
# Use numpy.testing.assert_equal which recursively compares
# lists of lists
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = check_cv(3, y_binary, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)),
list(cv.split(X, y_binary)))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = check_cv(3, y_multiclass, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)),
list(cv.split(X, y_multiclass)))
X = np.ones(5)
y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 0]])
cv = check_cv(3, y_multilabel, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = check_cv(3, y_multioutput, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
# Check if the old style classes are wrapped to have a split method
X = np.ones(9)
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv1 = check_cv(3, y_multiclass, classifier=True)
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv2 = check_cv(OldSKF(y_multiclass, n_folds=3))
np.testing.assert_equal(list(cv1.split(X, y_multiclass)),
list(cv2.split()))
assert_raises(ValueError, check_cv, cv="lolo")
def test_cv_iterable_wrapper():
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv = OldSKF(y_multiclass, n_folds=3)
wrapped_old_skf = _CVIterableWrapper(cv)
# Check if split works correctly
np.testing.assert_equal(list(cv), list(wrapped_old_skf.split()))
# Check if get_n_splits works correctly
assert_equal(len(cv), wrapped_old_skf.get_n_splits())
kf_iter = KFold(n_splits=5).split(X, y)
kf_iter_wrapped = check_cv(kf_iter)
# Since the wrapped iterable is enlisted and stored,
# split can be called any number of times to produce
# consistent results.
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_iter_wrapped.split(X, y)))
# If the splits are randomized, successive calls to split yields different
# results
kf_randomized_iter = KFold(n_splits=5, shuffle=True).split(X, y)
kf_randomized_iter_wrapped = check_cv(kf_randomized_iter)
np.testing.assert_equal(list(kf_randomized_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
try:
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
splits_are_equal = True
except AssertionError:
splits_are_equal = False
assert_false(splits_are_equal, "If the splits are randomized, "
"successive calls to split should yield different results")
def test_group_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_groups = 15
n_samples = 1000
n_splits = 5
X = y = np.ones(n_samples)
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
groups = rng.randint(0, n_groups, n_samples)
ideal_n_groups_per_fold = n_samples // n_splits
len(np.unique(groups))
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
lkf = GroupKFold(n_splits=n_splits)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Construct the test data
groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David',
'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'])
n_groups = len(np.unique(groups))
n_samples = len(groups)
n_splits = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
ideal_n_groups_per_fold = n_samples // n_splits
X = y = np.ones(n_samples)
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# groups can also be a list
cv_iter = list(lkf.split(X, y, groups.tolist()))
for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups),
cv_iter):
assert_array_equal(train1, train2)
assert_array_equal(test1, test2)
# Should fail if there are more folds than groups
groups = np.array([1, 1, 1, 2, 2])
X = y = np.ones(len(groups))
assert_raises_regexp(ValueError, "Cannot have number of splits.*greater",
next, GroupKFold(n_splits=3).split(X, y, groups))
def test_time_series_cv():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
# Should fail if there are more folds than samples
assert_raises_regexp(ValueError, "Cannot have number of folds.*greater",
next,
TimeSeriesSplit(n_splits=7).split(X))
tscv = TimeSeriesSplit(2)
# Manually check that Time Series CV preserves the data
# ordering on toy datasets
splits = tscv.split(X[:-1])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [4, 5])
splits = TimeSeriesSplit(2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1, 2])
assert_array_equal(test, [3, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [5, 6])
# Check get_n_splits returns the correct number of splits
splits = TimeSeriesSplit(2).split(X)
n_splits_actual = len(list(splits))
assert_equal(n_splits_actual, tscv.get_n_splits())
assert_equal(n_splits_actual, 2)
def test_nested_cv():
# Test if nested cross validation works with different combinations of cv
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 5, 15)
cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(),
StratifiedShuffleSplit(n_splits=3, random_state=0)]
for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]},
cv=inner_cv)
cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv,
fit_params={'groups': groups})
def test_build_repr():
class MockSplitter:
def __init__(self, a, b=0, c=None):
self.a = a
self.b = b
self.c = c
def __repr__(self):
return _build_repr(self)
assert_equal(repr(MockSplitter(5, 6)), "MockSplitter(a=5, b=6, c=None)")
| bsd-3-clause |
huzq/scikit-learn | sklearn/tests/test_common.py | 3 | 7611 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import os
import warnings
import sys
import re
import pkgutil
from inspect import isgenerator
from functools import partial
import pytest
from sklearn.utils import all_estimators
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.estimator_checks import check_estimator
import sklearn
from sklearn.base import BiclusterMixin
from sklearn.linear_model._base import LinearClassifierMixin
from sklearn.linear_model import LogisticRegression
from sklearn.utils import IS_PYPY
from sklearn.utils._testing import SkipTest
from sklearn.utils.estimator_checks import (
_construct_instance,
_set_checking_parameters,
_set_check_estimator_ids,
check_class_weight_balanced_linear_classifier,
parametrize_with_checks)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert not name.lower().startswith('base'), msg
def _sample_func(x, y=1):
pass
@pytest.mark.parametrize("val, expected", [
(partial(_sample_func, y=1), "_sample_func(y=1)"),
(_sample_func, "_sample_func"),
(partial(_sample_func, 'world'), "_sample_func"),
(LogisticRegression(C=2.0), "LogisticRegression(C=2.0)"),
(LogisticRegression(random_state=1, solver='newton-cg',
class_weight='balanced', warm_start=True),
"LogisticRegression(class_weight='balanced',random_state=1,"
"solver='newton-cg',warm_start=True)")
])
def test_set_check_estimator_ids(val, expected):
assert _set_check_estimator_ids(val) == expected
def _tested_estimators():
for name, Estimator in all_estimators():
if issubclass(Estimator, BiclusterMixin):
continue
try:
estimator = _construct_instance(Estimator)
except SkipTest:
continue
yield estimator
@parametrize_with_checks(list(_tested_estimators()))
def test_estimators(estimator, check, request):
# Common tests for estimator instances
with ignore_warnings(category=(FutureWarning,
ConvergenceWarning,
UserWarning, FutureWarning)):
_set_checking_parameters(estimator)
check(estimator)
def test_check_estimator_generate_only():
all_instance_gen_checks = check_estimator(LogisticRegression(),
generate_only=True)
assert isgenerator(all_instance_gen_checks)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
# ignore deprecated open(.., 'U') in numpy distutils
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in scikit-learn
# This test requires Cython which is not necessarily there when running
# the tests of an installed version of scikit-learn or when scikit-learn
# is installed in editable mode by pip build isolation enabled.
pytest.importorskip("Cython")
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
pytest.skip('setup.py not available')
# XXX unreached code as of v0.22
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def _tested_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
with warnings.catch_warnings(record=True):
for name, clazz in classifiers:
required_parameters = getattr(clazz, "_required_parameters", [])
if len(required_parameters):
# FIXME
continue
if ('class_weight' in clazz().get_params().keys() and
issubclass(clazz, LinearClassifierMixin)):
yield name, clazz
@pytest.mark.parametrize("name, Classifier",
_tested_linear_classifiers())
def test_class_weight_balanced_linear_classifiers(name, Classifier):
check_class_weight_balanced_linear_classifier(name, Classifier)
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
if IS_PYPY and ('_svmlight_format_io' in modname or
'feature_extraction._hashing_fast' in modname):
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
assert hasattr(package, name),\
"Module '{0}' has no attribute '{1}'".format(modname, name)
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup', 'conftest')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert modname in sklearn.__all__
def test_all_tests_are_importable():
# Ensure that for each contentful subpackage, there is a test directory
# within it that is also a subpackage (i.e. a directory with __init__.py)
HAS_TESTS_EXCEPTIONS = re.compile(r'''(?x)
\.externals(\.|$)|
\.tests(\.|$)|
\._
''')
lookup = {name: ispkg
for _, name, ispkg
in pkgutil.walk_packages(sklearn.__path__, prefix='sklearn.')}
missing_tests = [name for name, ispkg in lookup.items()
if ispkg
and not HAS_TESTS_EXCEPTIONS.search(name)
and name + '.tests' not in lookup]
assert missing_tests == [], ('{0} do not have `tests` subpackages. '
'Perhaps they require '
'__init__.py or an add_subpackage directive '
'in the parent '
'setup.py'.format(missing_tests))
def test_class_support_removed():
# Make sure passing classes to check_estimator or parametrize_with_checks
# raises an error
msg = "Passing a class was deprecated.* isn't supported anymore"
with pytest.raises(TypeError, match=msg):
check_estimator(LogisticRegression)
with pytest.raises(TypeError, match=msg):
parametrize_with_checks([LogisticRegression])
| bsd-3-clause |
PapaCharlie/SteamyReviews | app/models/game.py | 1 | 18549 | from __future__ import print_function, division
import csv
import base64
import json
import logging
import numpy as np
import os
import requests
import re
import sys
import time
from . import Review
from app import app
from app.dynamodb import db, utils
from app.utils import data_file, mallet_file
from bs4 import BeautifulSoup
from boto3.dynamodb.conditions import Key, Attr
from decimal import Decimal
from datetime import datetime
from functools import partial
from itertools import islice, imap
from Levenshtein import distance
from sklearn.preprocessing import normalize as normalize_matrix
reviews_re = re.compile(r"\(([0-9,]+) reviews?\)")
userscore_to_digit = {
"Overwhelmingly Positive": 8,
"Very Positive": 7,
"Positive": 6,
"Mostly Positive": 5,
"Mixed": 4,
"Mostly Negative": 3,
"Negative": 2,
"Very Negative": 1,
"Overwhelmingly Negative": 0
}
digit_to_userscore = {score: r for r,score in userscore_to_digit.iteritems()}
MAX_SPIDER_FEATURES = app.config["MAX_SPIDER_FEATURES"]
class GameNotFoundException(Exception):
def __init__(self, app_id):
super(GameNotFoundException, self).__init__("Game %s does not exist!"%app_id)
class Game(object):
table_name = "apps"
table = db.Table(table_name)
hash_key = ("app_id", utils.NUMBER)
sorting_key = None
__app_ids = None
__app_id_to_index = None
__compressed_matrix = None
__ranking = None
__game_cache = None
__name_inverted_index = None
__dimensions = None
__dimensions_inverted_index = None
@classmethod
def _load_caches(cls):
# cls.__app_ids, cls.__compressed_matrix = load_compressed_matrix()
cls.__app_ids, cls.__compressed_matrix = load_mallet_matrix()
# So we can pre-compute the ranking for every single game, since the compressed matrix is
# static per instance. Saves us a couple cycles
cls.__similarities = cls.__compressed_matrix.dot(cls.__compressed_matrix.T)
cls.__ranking = np.array([np.argsort(row)[::-1] for row in cls.__similarities])
cls.__app_id_to_index = {app_id: i for i, app_id in enumerate(cls.__app_ids)}
cls.__game_cache = {game.app_id: game
for game in iter_all_games()
if game.app_id in cls.__app_id_to_index}
cls.__name_inverted_index = {game.normalized_name: game.app_id
for game in cls.__game_cache.itervalues()}
cls.__dimensions = load_feature_names()
cls.__dimensions_inverted_index = {dim: i for i, dim in enumerate(cls.__dimensions)}
@classmethod
def get_from_steamspy(cls, app_id):
res = requests.get("http://steamspy.com/api.php?request=appdetails&appid=%s"%app_id)
if not 200 <= res.status_code < 300:
raise GameNotFoundException(app_id)
else:
return cls.from_steampspy_json(res.json())
@classmethod
def from_steampspy_json(cls, game):
# We don"t use any of these guys so we have to delete them
game.pop("owners_variance", None)
game.pop("players_forever", None)
game.pop("players_forever_variance", None)
game.pop("players_2weeks", None)
game.pop("players_2weeks_variance", None)
game.pop("average_forever", None)
game.pop("average_2weeks", None)
game.pop("median_forever", None)
game.pop("median_2weeks", None)
game.pop("ccu", None)
game["app_id"] = int(game.pop("appid"))
game["price"] = float(game["price"] or 0) / 100 # price is in cents
game["developer"] = game.get("developer", "") or ""
game["publisher"] = game.get("publisher", "") or ""
if len(game["tags"]) > 0 and isinstance(game["tags"], dict):
tags = {k.lower().strip(): v for k, v in game["tags"].iteritems()}
else:
tags = dict()
game["tags"] = tags
# we have to set the actual userscore and num_reviews to None because this API doesn"t
# return those values
game["userscore"] = None
game["num_reviews"] = None
if game["score_rank"] == "":
game["score_rank"] = None
else:
game["score_rank"] = game["score_rank"]
game["last_updated"] = datetime.utcnow()
return cls(**game)
@classmethod
def from_json(cls, game_json):
game_json["last_updated"] = datetime.utcfromtimestamp(int(game_json["last_updated"]))
return cls(**game_json)
@classmethod
def from_dynamo_json(cls, dynamo_json):
dynamo_json["name"] = dynamo_json.get("name") or ""
dynamo_json["normalized_name"] = dynamo_json.get("normalized_name") or ""
dynamo_json["developer"] = dynamo_json.get("developer") or ""
dynamo_json["publisher"] = dynamo_json.get("publisher") or ""
dynamo_json["price"] = float(dynamo_json["price"])
if dynamo_json["tags"] is not None and len(dynamo_json["tags"]) > 0:
dynamo_json["tags"] = {k: int(v) for k, v in dynamo_json["tags"].iteritems()}
else:
dynamo_json["tags"] = dict()
dynamo_json["last_updated"] = int(dynamo_json["last_updated"])
return cls.from_json(dynamo_json)
@classmethod
def batch_save(cls, games):
return utils.batch_save(cls, games)
@classmethod
def find_by_name(cls, name):
game = cls.__name_inverted_index.get(normalize(name))
if game is not None:
return cls.get(game)
@classmethod
def correct_game_name(cls, game_name, max_results=2):
game_name = normalize(game_name)
matches = sorted(cls.__name_inverted_index.keys(), key=partial(distance, game_name))
return [cls.get(cls.__name_inverted_index[match]) for match in matches[:max_results]]
@classmethod
def get(cls, to_get):
if isinstance(to_get, int):
return cls.__game_cache.get(to_get)
if not (isinstance(to_get, set) and len(to_get) > 0):
raise ValueError("`to_get` must be an int or a non-empty set! (got %s)"%type(to_get))
results = dict()
for app_id in to_get:
# this is a little funky, but it just standardizes how we get a single game, since
# we can"t really to actual multi-gets from Dynamo.
results[app_id] = cls.get(app_id)
return results
@classmethod
def get_all(cls):
return cls.__game_cache.itervalues()
@classmethod
def get_from_dynamo(cls, to_get):
multi = isinstance(to_get, set) and len(to_get) > 0
if not (multi or isinstance(to_get, int)):
raise ValueError("`to_get` must be an int or a non-empty set! (got %s)"%type(to_get))
if multi:
results = dict()
for app_id in to_get:
# this is a little funky, but it just standardizes how we get a single game, since
# we can"t really to actual multi-gets from Dynamo
results[app_id] = cls.get(app_id)
return results
else:
return cls.table.query(KeyConditionExpression=Key(cls.hash_key[0]).eq(to_get))
@classmethod
def get_all_from_dynamo(cls):
return imap(cls.from_dynamo_json, utils.table_scan(cls))
@classmethod
def get_unscored(cls):
return (game for game in cls.__game_cache.itervalues() if game.userscore is not None)
@classmethod
def get_unscored_from_dynamo(cls, limit=1000):
attr_cond = Attr("userscore").eq(None)
return imap(cls.from_dynamo_json,
islice(utils.table_scan(cls, FilterExpression=attr_cond, Limit=limit), limit))
@classmethod
def get_feature_indices(cls, features):
return np.array([cls.__dimensions_inverted_index[feature]
for feature in features
if feature in cls.__dimensions_inverted_index], dtype=np.int)
@classmethod
def compute_library_vector(cls, app_id_list, playtimes):
library_vector = np.zeros(Game.__compressed_matrix.shape[1])
for app_id, pt in zip(app_id_list, playtimes):
if app_id in Game.__app_id_to_index:
library_vector += cls.__game_cache[app_id].vector() * np.log(pt + 1)
library_vector = normalize_matrix(library_vector)[0]
return library_vector
@classmethod
def compute_ranking_for_vector(cls, query_vector, removed_features, app_id=None):
new_vector = np.copy(query_vector)
new_vector[removed_features] = 0
new_vector = normalize_matrix(new_vector)[0]
scores = cls.__compressed_matrix.dot(new_vector)
return [(scores[index], cls.get(cls.__app_ids[index]))
for index in np.argsort(scores)[::-1]
if cls.__app_ids[index] != app_id]
@classmethod
def get_vector_best_features(cls, vector, json_format=False):
best_features = np.argsort(vector)[::-1][:MAX_SPIDER_FEATURES]
best_features.sort()
if json_format:
return (json.dumps(vector[best_features].tolist()),
json.dumps(cls.__dimensions[best_features].tolist()))
else:
return vector[best_features], cls.__dimensions[best_features]
def __init__(self, app_id, name, developer, publisher, owners, userscore, num_reviews,
score_rank, price, tags, last_updated, **kwargs):
self.app_id = app_id
self.name = name
# this one is in the kwargs because it"s optional but depends on self.name
self.normalized_name = kwargs.get("normalized_name") or normalize(self.name)
self.normalized_name = self.normalized_name.encode("ascii")
self.developer = developer
self.publisher = publisher
self.owners = owners
self.userscore = userscore
self.num_reviews = num_reviews
self.score_rank = score_rank
self.price = price
self.tags = tags
self.last_updated = last_updated
if self.app_id not in Game.__app_id_to_index:
return
self.__app_index = Game.__app_id_to_index[self.app_id]
self.__vector = Game.__compressed_matrix[self.__app_index]
self.__best_feature_indices = np.argsort(self.vector())[::-1][:MAX_SPIDER_FEATURES]
# This is just so that the spider chart doens't look so *regular*
self.__best_feature_indices.sort()
def __repr__(self):
return "Game(app_id=%d,name=%s)"%(self.app_id, self.normalized_name)
def __str__(self):
return self.__repr__()
def vector(self):
if self.app_id not in Game.__app_id_to_index:
raise GameNotFoundException(self.app_id)
else:
return self.__vector
def vector_parsable(self):
return ",".join(map(str, self.vector()))
def steam_url(self):
return "http://store.steampowered.com/app/%s"%self.app_id
def steam_image_url(self):
return "http://cdn.akamai.steamstatic.com/steam/apps/%s/header.jpg"%self.app_id
def tags_json(self, just_keys=False, encoded=False):
to_return = None
if just_keys:
to_return = json.dumps(sorted(self.tags.keys(), key=self.tags.get, reverse=True))
else:
to_return = json.dumps(self.tags)
if encoded:
return base64.b64encode(to_return)
else:
return to_return
def get_ranking(self, library_vector, removed_features, bias_weight=0.3):
if self.app_id not in Game.__app_id_to_index:
raise GameNotFoundException(self.app_id)
if library_vector is None and len(removed_features) == 0:
ranking = Game.__ranking[self.__app_index]
scores = Game.__similarities[self.__app_index, ranking]
return [(score, Game.get(Game.__app_ids[app_index]))
for score, app_index in zip(scores, ranking)
if app_index != self.__app_index]
else:
new_vector = self.vector().copy()
if library_vector is not None:
new_vector += library_vector * bias_weight
return Game.compute_ranking_for_vector(new_vector,
removed_features=removed_features,
app_id=self.app_id)
def best_features(self, json_format=False):
features = self.__vector[self.__best_feature_indices]
if json_format:
return json.dumps(features.tolist())
else:
return features
def best_feature_names(self, json_format=False):
dimensions = Game.__dimensions[self.__best_feature_indices]
if json_format:
return json.dumps(dimensions.tolist())
else:
return dimensions
def intersect_features(self, other_game, json_format=False):
features = self.__vector[other_game.__best_feature_indices]
if json_format:
return json.dumps(features.tolist())
else:
return features
def compare_features(self, library_vector, json_format=False):
features = self.__vector[np.argsort(library_vector)[::-1][:MAX_SPIDER_FEATURES]]
if json_format:
return json.dumps(features.tolist())
else:
return features
def to_json(self):
return {
"app_id": self.app_id,
"name": self.name,
"normalized_name": self.normalized_name,
"developer": self.developer,
"publisher": self.publisher,
"owners": self.owners,
"userscore": self.userscore,
"num_reviews": self.num_reviews,
"score_rank": self.score_rank,
"price": self.price,
"tags": self.tags if len(self.tags) > 0 else None,
"last_updated": int(time.mktime(self.last_updated.timetuple())),
}
def to_dynamo_json(self):
dynamo_json = self.to_json()
dynamo_json["price"] = Decimal(str(self.price))
dynamo_json["name"] = self.name or None
dynamo_json["normalized_name"] = self.normalized_name or None
dynamo_json["developer"] = self.developer or None
dynamo_json["publisher"] = self.publisher or None
return dynamo_json
def save(self):
Game.table.put_item(Item=self.to_dynamo_json())
def fetch_more_reviews(self, limit=1000, save=False):
reviews = Review.get_reviews_from_steam(self.app_id, max_reviews=limit)
if save:
Review.batch_save(reviews)
return reviews
def get_saved_reviews(self, key_condition, filter_expression, max_items):
primary_condition = Key(Review.hash_key[0]).eq(self.app_id)
if key_condition is not None:
primary_condition = primary_condition & key_condition
return Review.get(primary_condition, filter_expression, max_items)
def get_recent_reviews(self, max_reviews=100):
return self.get_saved_reviews(None, None, max_reviews)
def update_and_save(self):
self.update_steamspy_attributes()
self.update_userscore()
self.last_updated = datetime.utcnow()
self.save()
def update_steamspy_attributes(self):
new_game = Game.get_from_steamspy(self.app_id)
self.name = new_game.name
self.developer = new_game.developer
self.publisher = new_game.publisher
self.owners = new_game.owners
self.userscore = new_game.userscore
self.num_reviews = new_game.num_reviews
self.score_rank = new_game.score_rank
self.price = new_game.price
self.tags = new_game.tags
def update_userscore(self):
page = requests.get("http://store.steampowered.com/app/%s"%self.app_id)
soup = BeautifulSoup(page.text, "lxml")
summary_section = soup.find_all("div", class_="summary_section")
for sec in summary_section:
title, score, num_reviews = sec.stripped_strings
if "overall" in title.lower():
matches = reviews_re.match(num_reviews)
if score in userscore_to_digit and matches is not None:
self.userscore = userscore_to_digit[score]
num_reviews, = matches.groups()
self.num_reviews = int(num_reviews.replace(",", ""))
print("Succesfully updated userscore for", self.app_id)
return
# This is just so that we don"t retry any games that can"t be scored (maybe because they
# haven"t come out yet) automatically.
print("Could not update userscore for", self.app_id)
self.userscore = -2
self.num_reviews = -2
STEAMSPY_GAMES_JSON = data_file("steamspy_games.json")
def iter_all_games():
if os.path.exists(STEAMSPY_GAMES_JSON):
with open(STEAMSPY_GAMES_JSON) as f:
games_json = json.load(f)
else:
games_json = requests.get("http://steamspy.com/api.php?request=all").json()
with open(STEAMSPY_GAMES_JSON, "w") as f:
json.dump(games_json, f, default=lambda o: o.__dict__, indent=2)
for app_id, game in games_json.iteritems():
if app_id == "999999":
continue
yield Game.from_steampspy_json(game)
def normalize(game_name):
return game_name.lower().encode("ascii", "ignore").strip()
def save_compressed_matrix(app_ids,
compressed_matrix,
filename=data_file("compressed_matrix.npy")):
with open(filename, "wb") as f:
np.save(f, np.column_stack((app_ids, compressed_matrix)))
def load_compressed_matrix(filename=data_file("compressed_matrix.npy")):
with open(filename, "rb") as f:
arr = np.load(f)
return arr[:, 0].astype(np.int), arr[:, 1:]
def load_mallet_matrix(filename=mallet_file("40_features", "doc_matrix.tsv")):
with open(filename, "r") as f:
reader = csv.reader(f, delimiter="\t")
app_ids = list()
vectors = list()
for line in reader:
app_ids.append(np.int(line[1].split("/")[-1]))
vector = np.array(map(np.float, line[2:]))
vectors.append(vector)
return np.array(app_ids), normalize_matrix(np.array(vectors))
def load_feature_names(filename=mallet_file("40_features", "feature_names.csv")):
with open(filename, "rb") as f:
return np.array([line.strip() for line in f if len(line) > 0])
| mit |
MadManRises/Madgine | shared/bullet3-2.89/examples/pybullet/examples/projective_texture.py | 2 | 1415 | import pybullet as p
from time import sleep
import matplotlib.pyplot as plt
import numpy as np
physicsClient = p.connect(p.GUI)
p.setGravity(0, 0, 0)
bearStartPos1 = [-3.3, 0, 0]
bearStartOrientation1 = p.getQuaternionFromEuler([0, 0, 0])
bearId1 = p.loadURDF("plane.urdf", bearStartPos1, bearStartOrientation1)
bearStartPos2 = [0, 0, 0]
bearStartOrientation2 = p.getQuaternionFromEuler([0, 0, 0])
bearId2 = p.loadURDF("teddy_large.urdf", bearStartPos2, bearStartOrientation2)
textureId = p.loadTexture("checker_grid.jpg")
#p.changeVisualShape(objectUniqueId=0, linkIndex=-1, textureUniqueId=textureId)
#p.changeVisualShape(objectUniqueId=1, linkIndex=-1, textureUniqueId=textureId)
useRealTimeSimulation = 1
if (useRealTimeSimulation):
p.setRealTimeSimulation(1)
while 1:
if (useRealTimeSimulation):
camera = p.getDebugVisualizerCamera()
viewMat = camera[2]
projMat = camera[3]
#An example of setting the view matrix for the projective texture.
#viewMat = p.computeViewMatrix(cameraEyePosition=[7,0,0], cameraTargetPosition=[0,0,0], cameraUpVector=[0,0,1])
p.getCameraImage(300,
300,
renderer=p.ER_BULLET_HARDWARE_OPENGL,
flags=p.ER_USE_PROJECTIVE_TEXTURE,
projectiveTextureView=viewMat,
projectiveTextureProj=projMat)
p.setGravity(0, 0, 0)
else:
p.stepSimulation()
| mit |
lin-credible/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
aparna29/Implementation-of-Random-Exponential-Marking-REM-in-ns-3 | src/flow-monitor/examples/wifi-olsr-flowmon.py | 59 | 7427 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <gjc@inescporto.pt>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.WifiMacHelper()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
pianomania/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
trachelr/mne-python | mne/decoding/ems.py | 16 | 4347 | # Author: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
from ..utils import logger, verbose
from ..fixes import Counter
from ..parallel import parallel_func
from .. import pick_types, pick_info
@verbose
def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, verbose=None):
"""Compute event-matched spatial filter on epochs
This version operates on the entire time course. No time window needs to
be specified. The result is a spatial filter at each time point and a
corresponding time course. Intuitively, the result gives the similarity
between the filter at each time point and the data vector (sensors) at
that time point.
References
----------
[1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None
If a list of strings, strings must match the
epochs.event_id's key as well as the number of conditions supported
by the objective_function. If None keys in epochs.event_id are used.
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
surrogate_trials : ndarray, shape (trials, n_trials, n_time_points)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_epochs,)
The conditions used. Values correspond to original event ids.
"""
logger.info('...computing surrogate time series. This can take some time')
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError('The same number of epochs is required by '
'this function. Please consider '
'`epochs.equalize_event_counts`')
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad_epochs()
if len(conditions) != 2:
raise ValueError('Currently this function expects exactly 2 '
'conditions but you gave me %i' %
len(conditions))
ev = epochs.events[:, 2]
# special care to avoid path dependant mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data()[:, picks]
# Scale (z-score) the data by channel type
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type in epochs:
if ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
from sklearn.cross_validation import LeaveOneOut
parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
for train, test in LeaveOneOut(len(data)))
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""default diff objective function"""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d
| bsd-3-clause |
hdmetor/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
huongttlan/statsmodels | statsmodels/sandbox/pca.py | 33 | 7098 | #Copyright (c) 2008 Erik Tollerud (etolleru@uci.edu)
from statsmodels.compat.python import zip
import numpy as np
from math import pi
class Pca(object):
"""
A basic class for Principal Component Analysis (PCA).
p is the number of dimensions, while N is the number of data points
"""
_colors=('r','g','b','c','y','m','k') #defaults
def __calc(self):
A = self.A
M=A-np.mean(A,axis=0)
N=M/np.std(M,axis=0)
self.M = M
self.N = N
self._eig = None
def __init__(self,data,names=None):
"""
p X N matrix input
"""
A = np.array(data).T
n,p = A.shape
self.n,self.p = n,p
if p > n:
from warnings import warn
warn('p > n - intentional?', RuntimeWarning)
self.A = A
self._origA=A.copy()
self.__calc()
self._colors= np.tile(self._colors,int((p-1)/len(self._colors))+1)[:p]
if names is not None and len(names) != p:
raise ValueError('names must match data dimension')
self.names = None if names is None else tuple([str(n) for n in names])
def getCovarianceMatrix(self):
"""
returns the covariance matrix for the dataset
"""
return np.cov(self.N.T)
def getEigensystem(self):
"""
returns a tuple of (eigenvalues,eigenvectors) for the data set.
"""
if self._eig is None:
res = np.linalg.eig(self.getCovarianceMatrix())
sorti=np.argsort(res[0])[::-1]
res=(res[0][sorti],res[1][:,sorti])
self._eig=res
return self._eig
def getEigenvalues(self):
return self.getEigensystem()[0]
def getEigenvectors(self):
return self.getEigensystem()[1]
def getEnergies(self):
"""
"energies" are just normalized eigenvectors
"""
v=self.getEigenvalues()
return v/np.sum(v)
def plot2d(self,ix=0,iy=1,clf=True):
"""
Generates a 2-dimensional plot of the data set and principle components
using matplotlib.
ix specifies which p-dimension to put on the x-axis of the plot
and iy specifies which to put on the y-axis (0-indexed)
"""
import matplotlib.pyplot as plt
x,y=self.N[:,ix],self.N[:,iy]
if clf:
plt.clf()
plt.scatter(x,y)
vals,evs=self.getEigensystem()
#evx,evy=evs[:,ix],evs[:,iy]
xl,xu=plt.xlim()
yl,yu=plt.ylim()
dx,dy=(xu-xl),(yu-yl)
for val,vec,c in zip(vals,evs.T,self._colors):
plt.arrow(0,0,val*vec[ix],val*vec[iy],head_width=0.05*(dx*dy/4)**0.5,fc=c,ec=c)
#plt.arrow(0,0,vals[ix]*evs[ix,ix],vals[ix]*evs[iy,ix],head_width=0.05*(dx*dy/4)**0.5,fc='g',ec='g')
#plt.arrow(0,0,vals[iy]*evs[ix,iy],vals[iy]*evs[iy,iy],head_width=0.05*(dx*dy/4)**0.5,fc='r',ec='r')
if self.names is not None:
plt.xlabel('$'+self.names[ix]+'/\\sigma$')
plt.ylabel('$'+self.names[iy]+'/\\sigma$')
def plot3d(self,ix=0,iy=1,iz=2,clf=True):
"""
Generates a 3-dimensional plot of the data set and principle components
using mayavi.
ix, iy, and iz specify which of the input p-dimensions to place on each of
the x,y,z axes, respectively (0-indexed).
"""
import enthought.mayavi.mlab as M
if clf:
M.clf()
z3=np.zeros(3)
v=(self.getEigenvectors()*self.getEigenvalues())
M.quiver3d(z3,z3,z3,v[ix],v[iy],v[iz],scale_factor=5)
M.points3d(self.N[:,ix],self.N[:,iy],self.N[:,iz],scale_factor=0.3)
if self.names:
M.axes(xlabel=self.names[ix]+'/sigma',ylabel=self.names[iy]+'/sigma',zlabel=self.names[iz]+'/sigma')
else:
M.axes()
def sigclip(self,sigs):
"""
clips out all data points that are more than a certain number
of standard deviations from the mean.
sigs can be either a single value or a length-p sequence that
specifies the number of standard deviations along each of the
p dimensions.
"""
if np.isscalar(sigs):
sigs=sigs*np.ones(self.N.shape[1])
sigs = sigs*np.std(self.N,axis=1)
n = self.N.shape[0]
m = np.all(np.abs(self.N) < sigs,axis=1)
self.A=self.A[m]
self.__calc()
return n-sum(m)
def reset(self):
self.A = self._origA.copy()
self.__calc()
def project(self,vals=None,enthresh=None,nPCs=None,cumen=None):
"""
projects the normalized values onto the components
enthresh, nPCs, and cumen determine how many PCs to use
if vals is None, the normalized data vectors are the values to project.
Otherwise, it should be convertable to a p x N array
returns n,p(>threshold) dimension array
"""
nonnones = sum([e != None for e in (enthresh,nPCs,cumen)])
if nonnones == 0:
m = slice(None)
elif nonnones > 1:
raise ValueError("can't specify more than one threshold")
else:
if enthresh is not None:
m = self.energies() > enthresh
elif nPCs is not None:
m = slice(None,nPCs)
elif cumen is not None:
m = np.cumsum(self.energies()) < cumen
else:
raise RuntimeError('Should be unreachable')
if vals is None:
vals = self.N.T
else:
vals = np.array(vals,copy=False)
if self.N.T.shape[0] != vals.shape[0]:
raise ValueError("shape for vals doesn't match")
proj = np.matrix(self.getEigenvectors()).T*vals
return proj[m].T
def deproject(self,A,normed=True):
"""
input is an n X q array, where q <= p
output is p X n
"""
A=np.atleast_2d(A)
n,q = A.shape
p = self.A.shape[1]
if q > p :
raise ValueError("q > p")
evinv=np.linalg.inv(np.matrix(self.getEigenvectors()).T)
zs = np.zeros((n,p))
zs[:,:q]=A
proj = evinv*zs.T
if normed:
return np.array(proj.T).T
else:
mns=np.mean(self.A,axis=0)
sds=np.std(self.M,axis=0)
return (np.array(proj.T)*sds+mns).T
def subtractPC(self,pc,vals=None):
"""
pc can be a scalar or any sequence of pc indecies
if vals is None, the source data is self.A, else whatever is in vals
(which must be p x m)
"""
if vals is None:
vals = self.A
else:
vals = vals.T
if vals.shape[1]!= self.A.shape[1]:
raise ValueError("vals don't have the correct number of components")
pcs=self.project()
zpcs=np.zeros_like(pcs)
zpcs[:,pc]=pcs[:,pc]
upc=self.deproject(zpcs,False)
A = vals.T-upc
B = A.T*np.std(self.M,axis=0)
return B+np.mean(self.A,axis=0)
| bsd-3-clause |
mjirik/teigen | teigen/tgmain.py | 1 | 44348 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © %YEAR% <>
#
# Distributed under terms of the %LICENSE% license.
import logging
logger = logging.getLogger(__name__)
import logging.handlers
import argparse
# import begin
import sys
import os
import os.path as op
import inspect
import numpy as np
import scipy
import re
import datetime
import copy
import collections
import pandas as pd
# from . import generators
from .generators import cylinders
from .generators import gensei_wrapper
# import .generators.cylinders
# from .generators.gensei_wrapper
from .generators import unconnected_cylinders
from imtools.dili import get_default_args
from imma import dili
import io3d.datawriter
import io3d.misc
import ndnoise
import ndnoise.generator
# from . import dictwidgetqt
# from . import geometry3d as g3
CKEY_APPEARANCE = "appearance"
CKEY_OUTPUT = "output"
CKEY_MEASUREMENT = "measurement"
class Teigen():
def __init__(self, logfile='~/tegen.log', loglevel=logging.DEBUG):
self.config_file_manager = ConfigFileManager("teigen")
self.config_file_manager.init_config_dir()
# self.loglevel = loglevel
self.logger = logging.getLogger()
logging.basicConfig()
self.filehandler = logging.handlers.RotatingFileHandler(
op.expanduser(logfile),
maxBytes=1000000,
backupCount=9
)
# self.filehandler.setLevel(self.loglevel)
# formatter = logging.Formatter('%(asctime)s %(name)-18s %(levelname)-8s %(message)s')
self.formatter = logging.Formatter(
'%(asctime)s %(levelname)-8s %(name)-18s %(lineno)-5d %(funcName)-12s %(message)s')
self.filehandler.setFormatter(self.formatter)
logger.addHandler(self.filehandler)
# self.memoryhandler = logging.handlers.MemoryHandler(1024*10, logging.DEBUG, streamhandler)
self.memoryhandler = logging.handlers.MemoryHandler(1024 * 100) # , logging.DEBUG, streamhandler)
# self.memoryhandler.setLevel(self.loglevel)
self.streamhandler = logging.StreamHandler()
self.streamhandler.setFormatter(self.formatter)
self.set_loglevel(loglevel)
logger.info("Starting Teigen")
self.logfile = logfile
self.version = "0.3.0"
self.data3d = None
self.voxelsize_mm = None
self.need_run = True
self.gen = None
self.generators_classes = [
# generators.cylinders.CylinderGenerator,
# generators.gensei_wrapper.GenseiGenerator,
# generators.cylinders.CylinderGenerator,
unconnected_cylinders.UnconnectedCylinderGenerator,
unconnected_cylinders.UnconnectedCylinderGenerator,
unconnected_cylinders.UnconnectedCylinderGenerator,
unconnected_cylinders.UnconnectedCylinderGenerator,
]
self.generators_names = [
# "Voronoi tubes",
# "Gensei",
# "Continuous tubes",
"Unconnected tubes",
"Connected tubes",
"Unconnected porosity",
"Connected porosity",
]
self._cfg_export_fcn = [
# self._config2generator_general_export,
# self._config2generator_gensei_export,
# self._config2generator_general_export,
self._config2generator_tubes_export,
self._config2generator_tubes_export,
self._config2generator_porosity_export,
self._config2generator_porosity_export,
]
self._cfg_negative = [
False, False, True, True
]
self.use_default_config()
self.progress_callback = None
self.temp_vtk_file = op.expanduser("~/tree.vtk")
# 3D visualization data, works for some generators
self.polydata_volume = None
self.dataframes = {}
self.stats_times = {
"datetime": str(datetime.datetime.now())
}
self.parameters_changed_before_save = True
self.fig_3d_render_snapshot = None
self.tube_skeleton = {}
def __del__(self):
self.filehandler.close()
def set_loglevel(self, loglevel):
self.loglevel = loglevel
self.logger.setLevel(self.loglevel)
self.filehandler.setLevel(self.loglevel)
self.memoryhandler.setLevel(self.loglevel)
self.streamhandler.setLevel(self.loglevel)
def use_default_config(self):
self.config = self.get_default_config()
def get_default_config(self):
"""
Create default configuration.
Configuration is composed from
:return:
"""
config = collections.OrderedDict()
# self.config["generators"] = [dictwidgetqt.get_default_args(conf) for conf in self.generators_classes]
hide_keys = ["build", "gtree", "voxelsize_mm", "areasize_px", "resolution",
"n_slice", "dims", "intensity_profile_intensity", "intensity_profile_radius"]
config["generators"] = collections.OrderedDict()
for generator_cl, generator_name in zip(
self.generators_classes,
self.generators_names
):
generator_params = get_default_args(generator_cl)
generator_params = dili.kick_from_dict(generator_params, hide_keys)
config["generators"][generator_name] = generator_params
config["generators"]["Unconnected tubes"]["allow_overlap"] = False
config["generators"]["Connected tubes"]["allow_overlap"] = True
config["generators"]["Unconnected porosity"]["allow_overlap"] = False
config["generators"]["Connected porosity"]["allow_overlap"] = True
# self.config["generator_id"] = self.generators_names[0]
config["generator_id"] = 0
# self.config = self.configs[0]
config["postprocessing"] = get_default_args(self.postprocessing)
config["postprocessing"]["intensity_profile_radius"] = [0.4, 0.7, 1.0, 1.3]
config["postprocessing"]["intensity_profile_intensity"] = [195, 190, 200, 30]
# config["postprocessing"][""] = dictwidgetqt.get_default_args(self.postprocessing)
config["areasampling"] = {
"voxelsize_mm": [0.01, 0.01, 0.01],
"areasize_mm": [2.0, 2.0, 2.0],
"areasize_px": [200, 200, 200]
}
config["filepattern"] = "~/teigen_data/{seriesn:03d}/data{:06d}.jpg"
config["filepattern_abspath"] = None
# config['filepattern_series_number'] = series_number
# self.config["voxelsize_mm"] = [1.0, 1.0, 1.0]
# self.config["areasize_mm"] = [100.0, 100.0, 100.0]
# self.config["areasize_px"] = [100, 100, 100]
config[CKEY_APPEARANCE] = {
"show_aposteriori_surface": True,
"skip_volume_generation": False,
"noise_preview": False,
"surface_3d_preview": False,
"force_rewrite": False,
}
# "force_rewrite" if series number is used on output dir
config["output"] = {
"one_row_filename": "~/teigen_data/output_rows.csv",
"aposteriori_measurement": False,
"aposteriori_measurement_multiplier": 1.0,
"note": ""
}
config["measurement"] = {
"polygon_radius_selection_method": "best",
# "polygon_radius_selection_method": "inscribed"
"tube_shape": True,
}
return config
def update_config(self, **config):
if "required_teigen_version" in config.keys():
reqired_version = config["required_teigen_version"]
if reqired_version != self.version:
logger.error(
"Wrong teigen version. Required: " + reqired_version + " , actual " + self.version)
return
config = copy.deepcopy(config)
# there can be stored more than our config. F.e. some GUI dict reconstruction information
self.config = dili.recursive_update(self.config, config)
self.voxelsize_mm = np.asarray(self.config["areasampling"]["voxelsize_mm"])
self.areasize_px = np.asarray(self.config["areasampling"]["areasize_px"])
self.parameters_changed_before_save = True
def get_generator_id_by_name_or_number(self, id):
# if id is not nuber but name of generator
if type(id) == str:
for i in range(len(self.generators_names)):
if id == self.generators_names[i]:
id = i
break
if type(id) == str:
logger.error("Unknown generator name: " + id)
return id
def _step1_init_generator(self, tube_skeleton=None):
t0 = datetime.datetime.now()
logger.info("step1_init_datetime" + str(t0))
st0 = str(t0)
self.stats_times["step1_init_datetime"] = st0
config = copy.deepcopy(self.config)
self.config_file_manager.save_init(self.config)
id = config.pop('generator_id')
id = self.get_generator_id_by_name_or_number(id)
self.stop_flag = False
# area_dct = config["areasampling"]
# area_cfg = self._cfg_export_fcn[id](area_dct)
area_cfg = self._cfg_export_fcn[id](config)
# TODO probably unused
config.update(area_cfg)
generator_class = self.generators_classes[id]
# self.config = get_default_args(generator_class)
# select only parameters for generator
# generator_default_config = dictwidgetqt.get_default_args(generator_class)
# generator_config = dictwidgetqt.subdict(config["generators"][id], generator_default_config.keys())
generator_config = list(config["generators"].items())[id][1]
generator_config.update(area_cfg)
# import ipdb;ipdb.set_trace()
self.gen = generator_class(**generator_config)
if id == 2:
self.gen.MAKE_IT_SHORTER_CONSTANT = 0.0
self.gen.OVERLAPS_ALOWED = True
self.gen.progress_callback = self.progress_callback
if tube_skeleton is not None:
self.gen.tree_data = tube_skeleton
logger.debug("step1 init generator finished")
logger.debug("step1 init generator finished")
return t0
def _step1_deinit_save_stats(self, t0):
self.tube_skeleton = self.gen.tree_data
# import ipdb;ipdb.set_trace()
t1 = datetime.datetime.now()
logger.debug("1D structure is generated")
logger.debug("before vtk generation")
pdatas = self.__generate_vtk(self.temp_vtk_file)
logger.debug("generate vtk finished")
self.polydata_volume = pdatas[0]
self.polydata_surface = pdatas[1]
t2 = datetime.datetime.now()
self.stats_times["step1_total_time_s"] = (t2 - t0).total_seconds()
self.stats_times["step1_generate_time_s"] = (t1 - t0).total_seconds()
self.stats_times["step1_generate_vtk_time_s"] = (t2 - t1).total_seconds()
self.stats_times["step1_finished"] = True
self.stats_times["step2_finished"] = False
self.time_run = t2 - t0
# self.prepare_stats()
self.config["filepattern_abspath"] = self.filepattern_fill_series()
one_row_filename = self.config["output"]["one_row_filename"]
if one_row_filename != "":
# self.prepare_stats()
self.save_stats_to_row(one_row_filename)
else:
self.prepare_stats()
logger.info("time: " + str(self.time_run))
self.need_run = False
self.parameters_changed_before_save = False
def step1_by_load_tube_skeleton(self, filename):
logger.debug("step1_by_loda_tube_skeleton")
self.load_tube_skeleton(filename=filename)
logger.debug("tube skeleton loaded")
t0 = self._step1_init_generator(self.tube_skeleton)
logger.debug("generator initiated")
logger.debug("generator initiated")
# t0 = datetime.datetime.now()
# st0 = str(t0)
# logger.info("step1_init_datetime " + st0)
# self.stats_times["step1_init_datetime"] = st0
self._step1_deinit_save_stats(t0)
def step1(self):
t0 = self._step1_init_generator()
# self.gen = generators.gensei_wrapper.GenseiGenerator(**self.config2)
# self.gen = generators.gensei_wrapper.GenseiGenerator()
logger.debug("1D structure generator started")
# logger.debug("1D structure generator started")
# import ipdb; ipdb.set_trace()
self.gen.run()
# logger.debug("vtk generated")
# import ipdb; ipdb.set_trace()
self._step1_deinit_save_stats(t0)
logger.debug("step1 finished")
def get_aposteriori_faces_and_vertices(self):
"""
:return: (faces, vertices)
"""
return self._aposteriori_surface_faces, self._aposteriori_surface_vertices
def get_config_file_pattern(self):
filepattern = self.config["filepattern"]
filepattern = io3d.datawriter.filepattern_fill_slice_number_or_position(filepattern, "")
base, ext = os.path.splitext(filepattern)
return base + "_parameters.yaml"
def __generate_vtk(self, vtk_file="~/tree.vtk"):
logger.info("generating vtk for surface and volume compensated objects")
vtk_file = op.expanduser(vtk_file)
# from tree import TreeBuilder
from .tb_vtk import TBVTK
if "tree_data" in dir(self.gen):
resolution = self.config["postprocessing"]["measurement_resolution"]
method = self.config["measurement"]["polygon_radius_selection_method"]
tube_shape = self.config["measurement"]["tube_shape"]
logger.debug("surface_tube_shape " + str(tube_shape))
if method == "best":
method_vol = "cylinder volume + sphere error"
method_surf = "cylinder surface + sphere error + join error"
else:
method_vol = method
method_surf = None
# build volume tree
logger.debug("vtk generation - volume compensated")
tvg = TBVTK(
cylinder_resolution=resolution,
sphere_resolution=resolution,
polygon_radius_selection_method=method_vol,
tube_shape=tube_shape
)
# yaml_path = os.path.join(path_to_script, "./hist_stats_test.yaml")
# tvg.importFromYaml(yaml_path)
tvg.voxelsize_mm = self.voxelsize_mm
tvg.shape = self.gen.areasize_px
tvg.tube_skeleton = self.tube_skeleton
output = tvg.buildTree() # noqa
# tvg.show()
# TODO control output
tvg.saveToFile(vtk_file)
polydata_vol = tvg.polyData
# build surface tree
if method_surf is not None:
logger.debug("vtk generation - surface compensated")
from .tb_vtk import TBVTK
tvg2 = TBVTK(
cylinder_resolution=resolution,
sphere_resolution=resolution,
polygon_radius_selection_method=method_surf,
tube_shape=tube_shape
)
tvg2.voxelsize_mm = self.voxelsize_mm
tvg2.shape = self.gen.areasize_px
tvg2.tube_skeleton = self.tube_skeleton
output = tvg2.buildTree() # noqa
polydata_surf = tvg2.polyData
# tvg.show()
# TODO control output
# tvg.saveToFile(vtk_file)
else:
polydata_surf = None
return polydata_vol, polydata_surf
def stop(self):
self.stop_flag = True
def filepattern_fill_potential_series(self):
import io3d.datawriter
# filepattern = self.config["filepattern"]
filepattern = self.get_config_file_pattern()
sn = io3d.datawriter.get_unoccupied_series_number(filepattern)
filepattern = re.sub(r"({\s*})", r"", filepattern)
filepattern = io3d.datawriter.filepattern_fill_series_number(filepattern, sn)
return filepattern
def filepattern_fill_series(self):
"""
Return base and ext of file. The slice_number and slice_position is ignored.
:return:
"""
import io3d.datawriter
filepattern = self.config["filepattern"]
force_rewrite = self.config[CKEY_APPEARANCE]["force_rewrite"]
# self.refresh_unoccupied_series_number()
if force_rewrite and "filepattern_series_number" in self.config.keys():
pass
else:
sn = io3d.datawriter.get_unoccupied_series_number(filepattern)
self.config["filepattern_series_number"] = sn
filepattern_series_number = self.config["filepattern_series_number"]
filepattern = re.sub(r"({\s*})", r"", filepattern)
filepattern = io3d.datawriter.filepattern_fill_series_number(filepattern, filepattern_series_number)
return filepattern
def filepattern_split(self):
filepattern = self.filepattern_fill_series()
filepattern = re.sub(r"({.*?})", r"", filepattern)
root, ext = op.splitext(filepattern)
return root, ext
def get_fn_base(self):
fn_base, fn_ext = self.filepattern_split()
fn_base = op.expanduser(fn_base)
return fn_base
def refresh_unoccupied_series_number(self):
config_filepattern = self.get_config_file_pattern()
series_number = io3d.datawriter.get_unoccupied_series_number(filepattern=config_filepattern)
# series_number = io3d.datawriter.get_unoccupied_series_number(filepattern=self.config["filepattern"])
self.config['filepattern_series_number'] = series_number
def save_parameters(self, filename=None):
if filename is None:
fn_base = self.get_fn_base()
dirname = op.dirname(fn_base)
if not op.exists(dirname):
os.makedirs(dirname)
filename = fn_base + "_config.yaml"
io3d.misc.obj_to_file(self.config, filename=filename)
return filename
def save_config_and_measurement(self, filename=None):
if filename is None:
fn_base = self.get_fn_base()
dirname = op.dirname(fn_base)
if not op.exists(dirname):
os.makedirs(dirname)
filename = fn_base + "_config_and_measurement.yaml"
logger.debug(f"save config and measurement to path: {filename}")
config_and_measurement = self.get_config_and_measurement()
io3d.misc.obj_to_file(config_and_measurement, filename=filename)
def save_log(self):
fn_base = self.get_fn_base()
handler = logging.FileHandler(fn_base + ".log")
handler.setFormatter(self.formatter)
handler.setLevel(self.loglevel)
self.memoryhandler.setTarget(handler)
self.memoryhandler.flush()
self.memoryhandler.flushLevel = self.loglevel
def generate_volume(self):
background_intensity=self.config["postprocessing"]["background_intensity"]
self.data3d = self.gen.generate_volume(
voxelsize_mm=self.config["areasampling"]["voxelsize_mm"],
shape=self.config["areasampling"]["areasize_px"],
tube_skeleton=self.tube_skeleton, dtype="uint8", background_intensity=background_intensity)
# self.voxelsize_mm = self.gen.voxelsize_mm
# this will change negative and positive
id = self.config['generator_id']
id = self.get_generator_id_by_name_or_number(id)
# from pprint import pprint
# plogger.debug(self.config)
# import ipdb; ipdb.set_trace()
# config = self._cfg_export_fcn[id](self.config)
postprocessing_params = self.config["postprocessing"]
postprocessing_params["negative"] = self._cfg_negative[id]
data3d = self.postprocessing(**postprocessing_params)
self.gen.data3d = data3d
def step2(self):
if self.parameters_changed_before_save:
self.step1()
# TODO split save_volume and save_parameters
if len(self.tube_skeleton) == 0:
logger.error("No data generated. 1D skeleton is empty.")
return
logger.debug(f"filepattern abspath: {self.config['filepattern_abspath']}")
self.refresh_unoccupied_series_number()
self.save_parameters()
self.save_log()
t0 = datetime.datetime.now()
fn_base = self.get_fn_base()
# config["filepattern"] = filepattern
self._aposteriori_numeric_measurement(fn_base)
logger.debug(f"step2 save stats: {fn_base}")
self.save_stats(fn_base)
t1 = datetime.datetime.now()
self.save_1d_model_to_file(fn_base + "_vt.yaml")
logger.debug("before volume generate " + str(t1 - t0))
self.save_surface_to_file(fn_base + "_surface.vtk")
#self.save_surface_to_file(fn_base + "_surface.vtk")
# postprocessing
skip_vg = self.config[CKEY_APPEARANCE]["skip_volume_generation"]
t2 = t1
if (not skip_vg) and ("generate_volume" in dir(self.gen)):
# self.data3d = self.gen.generate_volume()
self.generate_volume()
# self.gen.saveVolumeToFile(self.config["filepattern"])
t2 = datetime.datetime.now()
logger.debug("volume generated in: " + str(t2 - t0))
self.gen.saveVolumeToFile(self.config["filepattern_abspath"])
t3 = datetime.datetime.now()
logger.info("time before volume generate: " + str(t1 - t0))
logger.info("time before volume save: " + str(t2 - t0))
logger.info("time after volume save: " + str(t3 - t0))
self.stats_times["step2_init_datetime"] = str(t0)
self.stats_times["step2_numeric_measurement_time_s"] = (t1 - t0).total_seconds()
self.stats_times["step2_generate_volume_time_s"] = (t2 - t1).total_seconds()
self.stats_times["step2_save_volume_time_s"] = (t3 - t2).total_seconds()
self.stats_times["step2_total_time_s"] = (t3 - t0).total_seconds()
self.stats_times["step2_finish_datetime"] = str(t3)
self.stats_times["step2_finished"] = True
# fnp_abs = self.config["filepattern_abspath"]
self.save_config_and_measurement(filename=fn_base + "_config_and_measurement.yaml")
one_row_filename = self.config["output"]["one_row_filename"]
logger.debug("write stats to common spreadsheet")
if one_row_filename != "":
# self.prepare_stats()
self.save_stats_to_row(one_row_filename)
else:
self.prepare_stats()
logger.debug("step2 finished")
# self.memoryhandler.flush()
def save_1d_model_to_file(self, outputfile):
tree_data = dili.ndarray_to_list_in_structure(self.gen.tree_data)
tree = {
"voxelsize_mm": np.asarray(self.config["areasampling"]["voxelsize_mm"]).tolist(),
"voxelsize_px": np.asarray(self.config["areasampling"]["areasize_px"]).tolist(),
"Graph": {"0": tree_data}
}
io3d.misc.obj_to_file(tree, outputfile)
def save_surface_to_file(self, outputfile, lc_all="C"):
import vtk
logger.debug("vtk version " + str(vtk.VTK_BUILD_VERSION))
if lc_all is not None:
import locale
locale.setlocale(locale.LC_ALL, lc_all)
# import ipdb; ipdb.set_trace()
writer = vtk.vtkPolyDataWriter()
writer.SetFileName(outputfile)
try:
writer.SetInputData(self.polydata_volume)
except:
logger.warning("old vtk is used")
writer.SetInput(self.polydata_volume)
writer.Write()
def postprocessing(
self,
gaussian_blur=True,
gaussian_filter_sigma_mm=0.01,
add_noise=False,
# gaussian_noise_stddev=10.0,
# gaussian_noise_center=0.0,
limit_negative_intensities=True,
noise_rng_seed=0,
noise_exponent=0.0,
noise_lambda0=0.02,
noise_lambda1=1.0,
noise_std=40.0,
noise_mean=30.0,
# surface_measurement=False,
# measurement_multiplier=-1,
measurement_resolution=20,
output_dtype="uint8",
intensity_profile_radius=[0.7, 1.0, 1.3],
intensity_profile_intensity=[190, 200, 30],
negative=False,
background_intensity=20
):
# negative is removed because we want it hide. The tab widget is used to control this
# property
dt = self.data3d.dtype
logger.debug(f"len(unique(data3d)): {len(np.unique(self.data3d))}")
logger.debug(f"describe(data3d) before gaussian: {scipy.stats.describe(self.data3d.flatten())}")
if gaussian_blur:
sigma_px = gaussian_filter_sigma_mm / self.voxelsize_mm
self.data3d = scipy.ndimage.filters.gaussian_filter(
self.data3d,
sigma=sigma_px)
logger.debug(f"describe(data3d) before noise: {scipy.stats.describe(self.data3d.flatten())}")
if add_noise:
noise = self.generate_noise()
# noise = noise.astype(self.data3d.dtype)
# noise = np.random.normal(loc=gaussian_noise_center, scale=gaussian_noise_stddev, size=self.data3d.shape)
self.data3d = self.data3d + noise
logger.debug(f"negative: {negative}")
if negative:
self.data3d = 255 - self.data3d
logger.debug(f"after negative describe(data3d): {scipy.stats.describe(self.data3d.flatten())}")
if limit_negative_intensities:
#self.data3d[self.data3d < 0] = 0
limit_ndarray(self.data3d, minimum=0, maximum=255)
self.data3d = self.data3d.astype(dt)
# self.config["postprocessing"]["measurement_multiplier"] = measurement_multiplier
# negative = self.config["postprocessing"]["negative"] = measurement_multiplier
return self.data3d
def generate_noise(self):
pparams = self.config["postprocessing"]
parea = self.config["areasampling"]
# data3d = self.postprocessing(**postprocessing_params)
noise_params = dict(
shape=parea["areasize_px"],
sample_spacing=parea["voxelsize_mm"],
exponent=pparams["noise_exponent"],
random_generator_seed=pparams["noise_rng_seed"],
lambda0=pparams["noise_lambda0"],
lambda1=pparams["noise_lambda1"],
)
noise = ndnoise.noises(
**noise_params
) #.astype(np.float16)
noise = pparams["noise_std"] * noise / np.std(noise)
noise = noise + pparams["noise_mean"]
return noise
def _aposteriori_numeric_measurement(self, fn_base):
# import numpy as np
from .tb_volume import TBVolume
measurement_multiplier = self.config[CKEY_OUTPUT]["aposteriori_measurement_multiplier"]
surface_measurement = self.config[CKEY_OUTPUT]["aposteriori_measurement"]
vxsz = self.config["areasampling"]["voxelsize_mm"]
vxsz = np.asarray(vxsz).astype(np.float) / measurement_multiplier
shape = self.config["areasampling"]["areasize_px"]
if measurement_multiplier > 0 and surface_measurement:
shape = np.asarray(shape) * measurement_multiplier
self._numeric_surface_measurement_shape = shape
shape = shape.astype(np.int)
tvgvol = TBVolume()
tvgvol.voxelsize_mm = vxsz
tvgvol.shape = shape
tvgvol.tube_skeleton = self.gen.tree_data
data3d = tvgvol.buildTree()
import measurement
surface, vertices, faces = measurement.surface_measurement(data3d, tvgvol.voxelsize_mm,
return_vertices_and_faces=True)
self._aposteriori_surface_vertices = vertices
self._aposteriori_surface_faces = faces
volume = np.sum(data3d > 0) * np.prod(vxsz)
self.dataframes["overall"]["aposteriori numeric surface [mm^2]"] = [surface]
self.dataframes["overall"]["aposteriori numeric volume [mm^3]"] = [volume]
filename = fn_base + "_raw_{:06d}.jpg"
import io3d.misc
data = {
'data3d': data3d.astype(np.uint8), # * 70, # * self.output_intensity,
'voxelsize_mm': vxsz,
# 'segmentation': np.zeros_like(self.data3d, dtype=np.int8)
}
io3d.write(data, filename)
else:
surface = 0
return surface
def get_stats(self):
""" Return volume, surface, length and radius information.
:return:
Using one of generators to compute statistics.
"""
from .generators import unconnected_cylinders as uncy
gen = uncy.UnconnectedCylinderGenerator(
areasize_px=self.config["areasampling"]["areasize_px"],
voxelsize_mm=self.config["areasampling"]["voxelsize_mm"],
)
gen.init_stats()
for id, element in self.tube_skeleton.items():
if (
"nodeA_ZYX_mm" in element.keys() and
"nodeB_ZYX_mm" in element.keys() and
"radius_mm" in element.keys()
):
nodeA = element["nodeA_ZYX_mm"]
nodeB = element["nodeB_ZYX_mm"]
radius = element["radius_mm"]
gen.add_cylinder_to_stats(nodeA, nodeB, radius)
else:
logger.warning("Missing key in element id (" +
str(id) + "). Two point and radius are required")
return gen.get_stats()
def prepare_stats(self):
to_rename = {
"length": "length [mm]",
"volume": "volume [mm^3]",
"surface": "surface [mm^2]",
"radius": "radius [mm]"
}
to_rename_density = {
"length": "length d. [mm^-2]",
"volume": "volume d. []",
"surface": "surface d. [mm^-1]"
# "radius": "radius [mm^-2]"
}
# this compute correct even in case we are using cylinders
# df = self.gen.get_stats()
# this works fine even if data are loaded from file
df = self.get_stats()
self.dataframes["elements"] = df
dfdescribe = df.describe()
dfdescribe.insert(0, "", dfdescribe.index)
count = dfdescribe["length"][0]
dfdescribe = dfdescribe.ix[1:]
dfdescribe = dfdescribe.rename(columns=to_rename)
self.dataframes["describe"] = dfdescribe
dfmerne = df[["length", "volume", "surface"]].sum() / self.gen.area_volume
dfmernef = dfmerne.to_frame().transpose().rename(columns=to_rename_density)
self.dataframes["density"] = dfmernef
# whole sumary data
dfoverall = df[["length", "volume", "surface"]].sum()
dfoverallf = dfoverall.to_frame().transpose().rename(columns=to_rename)
dfoverallf["area volume [mm^3]"] = [self.gen.area_volume]
dfoverallf["count []"] = [count]
dfoverallf["mean radius [mm]"] = df["radius"].mean()
# surface and volume measurement
import vtk
mass = vtk.vtkMassProperties()
# mass.SetInputData(object1Tri.GetOutput())
mass.SetInputData(self.polydata_volume)
vol = mass.GetVolume()
if self.polydata_surface is None:
surf = mass.GetSurfaceArea()
else:
mass = vtk.vtkMassProperties()
mass.SetInputData(self.polydata_surface)
surf = mass.GetSurfaceArea()
dfoverallf["numeric volume [mm^3]"] = [vol]
dfoverallf["numeric surface [mm^2]"] = [surf]
dfoverallf["numeric volume fraction []"] = [vol/self.gen.area_volume]
dfoverallf["negative numeric volume fraction []"] = [1. - vol/self.gen.area_volume]
dfoverallf["negative numeric volume [mm^3]"] = [self.gen.area_volume - vol]
self.dataframes["overall"] = dfoverallf
st = self.stats_times
# logger.debug("st ", st)
note_df = pd.DataFrame([st], columns=st.keys())
# logger.debug(note_df)
# logger.debug(note_df.to_dict())
self.dataframes["processing_info"] = note_df
def load_tube_skeleton(self, filename):
""" Load tube skeleton and remember it.
:param filename:
:return:
"""
# params = io3d.misc.obj_from_file(filename=filename)
from .import tree
tube_skeleton, rawdata = tree.read_tube_skeleton_from_yaml(filename, return_rawdata=True)
area = tree.parse_area_properties(rawdata)
self.config["areasampling"].update(area)
self.set_tube_skeleton(tube_skeleton)
def set_tube_skeleton(self, tube_skeleton):
self.tube_skeleton = tube_skeleton
def get_tube_skeleton(self):
return self.tube_skeleton
def load_config(self, filename):
""" Load config from file.
:param filename:
:return:
"""
params = io3d.misc.obj_from_file(filename=filename)
self.use_default_config()
self.update_config(**params)
def save_stats(self, fn_base):
import pandas as pd
for dfname in self.dataframes:
df = self.dataframes[dfname]
# to csv
df.to_csv(fn_base + "_" + dfname + ".csv")
try:
writer = pd.ExcelWriter(fn_base + "_output.xlsx", engine="xlsxwriter")
for dfname in ["overall", "density"]:
logger.debug("adding xls list " + dfname)
df = self.dataframes[dfname]
# to excel
df.to_excel(writer, dfname)
writer.save()
except:
import traceback
traceback.print_exc()
s = traceback.format_exc()
logger.warning(s)
def get_flatten_config(self):
""" Put input configuration into one row.
:return:
"""
config = self.config
config_fl = dili.flatten_dict(config, join=lambda a, b: a + ' ' + b)
config_fl = dict(config_fl)
return config_fl
def config_to_row_dataframe(self):
config_fl = self.get_flatten_config()
config_df = pd.DataFrame([config_fl], columns=config_fl.keys())
return config_df
def get_config_and_measurement(self):
self.prepare_stats()
dfo = self.dataframes["overall"].to_dict(orient="records")[0]
dfd = self.dataframes["density"].to_dict(orient="records")[0]
dfi = self.dataframes["processing_info"].to_dict(orient="records")[0]
dfo.update(dfd)
data_structured = {
"measurement": dfo,
"processing_info": dfi,
"config": self.config
}
return data_structured
def get_flatten_config_and_measurement(self):
import imtools.dili
return imtools.dili.flatten_dict_join_keys(self.get_config_and_measurement(), " ")
def save_stats_to_row(self, filename, note=""):
""" Save stats to row
:param filename:
:param note:
:return:
"""
self.prepare_stats()
filename = op.expanduser(filename)
import pandas as pd
# filename = op.expanduser(filename)
# dfo = self.dataframes["overall"]
# dfd = self.dataframes["density"]
# dfi = self.dataframes["processing_info"]
#
#
# # values must be a list for dataframe
# # new_values = []
# # for val in config_fl.values():
# # new_values.append([val])
#
# # config_fl_li = dict(zip(config_fl.keys(), new_values))
# # config_df = pd.DataFrame(config_fl_li)
# config_fl = self.get_flatten_config()
#
# config_df = pd.DataFrame([config_fl], columns=config_fl.keys())
# # import ipdb; ipdb.set_trace()
# dfout = pd.concat([dfi, dfo, dfd, config_df], axis=1)
config_fl = self.get_flatten_config_and_measurement()
dfout = pd.DataFrame([config_fl], columns=config_fl.keys())
if op.exists(filename):
dfin = pd.read_csv(filename)
dfout = pd.concat([dfin, dfout], axis=0, sort=False)
else:
dirname = op.dirname(filename)
if not op.exists(dirname):
os.makedirs(dirname)
dfout.to_csv(filename, index=False)
# import ipdb; ipdb.set_trace()
# pass
def _config2generator_general_export(self, config):
return {
'voxelsize_mm': config["areasampling"]["voxelsize_mm"],
'areasize_px': config["areasampling"]["areasize_px"],
"intensity_profile_radius": config["postprocessing"]["intensity_profile_radius"],
"intensity_profile_intensity": config["postprocessing"]["intensity_profile_intensity"],
"tube_shape": config["measurement"]["tube_shape"]
}
def _config2generator_tubes_export(self, config):
config["postprocessing"]["negative"] = False
generator_config = self._config2generator_general_export(config)
return generator_config
def _config2generator_porosity_export(self, config):
config["postprocessing"]["negative"] = True
generator_config = self._config2generator_general_export(config)
return generator_config
def _config2generator_gensei_export(self, config):
asp = config["areasampling"]
vs_mm = np.asarray(asp["voxelsize_mm"])
resolution = 1.0 / vs_mm
dct = {
'dims': asp["areasize_mm"],
'n_slice': asp["areasize_px"][0],
'resolution': [resolution[1], resolution[2]]
}
return dct
# def save_volume_to_file(self, filename):
#
# import io3
# import io3d.misc
# import numpy as np
# data = {
# 'data3d': self.data3d.astype(np.uint8), #* self.output_intensity,
# 'voxelsize_mm': self.voxelsize_mm,
# # 'segmentation': np.zeros_like(self.data3d, dtype=np.int8)
# }
# io3d.write(data, filename)
def run_batch(self, config_list):
for filename in config_list:
if filename is None:
continue
params = io3d.misc.obj_from_file(filename=filename)
default_config = self.get_default_config()
self.update_config(**default_config)
self.update_config(**params)
self.step1()
self.step2()
class ConfigFileManager():
def __init__(
self,
appname=None,
config_dir_pattern="~/.config/",
default_config_file="default_config.yaml",
favorite_config_file="favorite_config.yaml",
init_config_file="init_config.yaml",
log_file="favorite.yaml"
):
self.appname = appname
self.config_dir = op.expanduser(op.join(config_dir_pattern, appname))
self.default_config_file = op.join(self.config_dir, default_config_file)
self.default_config = None
self.favorite_config_file = op.join(self.config_dir, favorite_config_file)
self.favorite_config = None
self.init_config_file = op.join(self.config_dir, init_config_file)
self.init_config = None
self.logfile = op.join(self.config_dir, log_file)
def init_config_dir(self):
if not op.exists(self.config_dir):
import os
os.makedirs(self.config_dir)
def save_default(self, config):
io3d.misc.obj_to_file(config, self.default_config_file)
def load_default(self):
return io3d.misc.obj_from_file(self.default_config_file)
def save_favorite(self, config):
io3d.misc.obj_to_file(config, self.favorite_config_file)
def load_favorite(self):
return io3d.misc.obj_from_file(self.favorite_config_file)
def save_init(self, config):
io3d.misc.obj_to_file(config, self.init_config_file)
def load_init(self):
return io3d.misc.obj_from_file(self.init_config_file)
# @click.command()
# @begin.start
def new_main(
parameterfile=None,
debug=True,
d=True,
nointeractivity=False,
logfile="~/teigen.log",
):
""" Run test image generator.
:param parameterfile:
:param debug:
:param d:
:param nointeractivity:
:param logfile:
:return:
"""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
logger.addHandler(ch)
config_file_manager = ConfigFileManager("teigen")
config_file_manager.init_config_dir()
if parameterfile is None:
parameterfile = config_file_manager.init_config_file
if debug or d:
ch.setLevel(logging.DEBUG)
# default param file
if not op.exists(op.expanduser(parameterfile)):
parameterfile = None
if nointeractivity:
tg = Teigen(logfile=logfile)
if parameterfile is not None:
params = io3d.misc.obj_from_file(parameterfile)
tg.update_config(**params)
tg.step1()
# tg.run(**params)
tg.step2()
else:
from PyQt5.QtWidgets import QApplication
from .gui import TeigenWidget
app = QApplication(sys.argv)
params = None
if parameterfile is not None:
params = io3d.misc.obj_from_file(parameterfile)
cw = TeigenWidget(logfile=logfile, config=params)
cw.show()
app.exec_()
def limit_ndarray(data, minimum, maximum):
data[data < minimum] = minimum
data[data > maximum] = maximum
return data
def main():
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
logger.addHandler(ch)
config_file_manager = ConfigFileManager("teigen")
config_file_manager.init_config_dir()
# create file handler which logs even debug messages
# fh = logging.FileHandler('log.txt')
# fh.setLevel(logging.DEBUG)
# formatter = logging.Formatter(
# '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# fh.setFormatter(formatter)
# logger.addHandler(fh)
# logger.debug('start')
# input parser
parser = argparse.ArgumentParser(
description=__doc__
)
parser.add_argument(
'-p', '--parameterfile',
default=config_file_manager.init_config_file,
# required=True,
help='input parameter file'
)
parser.add_argument(
'-d', '--debug', action='store_true',
help='Debug mode')
parser.add_argument(
'-ni', '--nointeractivity', action='store_true',
help='No interactivity mode')
parser.add_argument(
'-l', '--logfile',
default="~/teigen.log",
help='Debug mode')
args = parser.parse_args()
if args.debug:
ch.setLevel(logging.DEBUG)
# default param file
if not op.exists(op.expanduser(args.parameterfile)):
args.parameterfile = None
if args.nointeractivity:
tg = Teigen(logfile=args.logfile)
if args.parameterfile is not None:
params = io3d.misc.obj_from_file(args.parameterfile)
tg.update_config(**params)
tg.step1()
# tg.run(**params)
tg.step2()
else:
from PyQt5.QtWidgets import QApplication
from .gui import TeigenWidget
app = QApplication(sys.argv)
params = None
if args.parameterfile is not None:
try:
params = io3d.misc.obj_from_file(args.parameterfile)
except Exception as e:
import traceback
logger.warning(f"Problem with reading: {args.parameterfile}")
logger.warning(traceback.format_exc())
cw = TeigenWidget(logfile=args.logfile, config=params)
cw.show()
app.exec_()
| apache-2.0 |
madmouser1/aubio | python/demos/demo_waveform_plot.py | 10 | 2099 | #! /usr/bin/env python
import sys
from aubio import pvoc, source
from numpy import zeros, hstack
def get_waveform_plot(filename, samplerate = 0, block_size = 4096, ax = None, downsample = 2**4):
import matplotlib.pyplot as plt
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
hop_s = block_size
allsamples_max = zeros(0,)
downsample = downsample # to plot n samples / hop_s
a = source(filename, samplerate, hop_s) # source file
if samplerate == 0: samplerate = a.samplerate
total_frames = 0
while True:
samples, read = a()
# keep some data to plot it later
new_maxes = (abs(samples.reshape(hop_s/downsample, downsample))).max(axis=0)
allsamples_max = hstack([allsamples_max, new_maxes])
total_frames += read
if read < hop_s: break
allsamples_max = (allsamples_max > 0) * allsamples_max
allsamples_max_times = [ ( float (t) / downsample ) * hop_s for t in range(len(allsamples_max)) ]
ax.plot(allsamples_max_times, allsamples_max, '-b')
ax.plot(allsamples_max_times, -allsamples_max, '-b')
ax.axis(xmin = allsamples_max_times[0], xmax = allsamples_max_times[-1])
set_xlabels_sample2time(ax, allsamples_max_times[-1], samplerate)
return ax
def set_xlabels_sample2time(ax, latest_sample, samplerate):
ax.axis(xmin = 0, xmax = latest_sample)
if latest_sample / float(samplerate) > 60:
ax.set_xlabel('time (mm:ss)')
ax.set_xticklabels([ "%02d:%02d" % (t/float(samplerate)/60, (t/float(samplerate))%60) for t in ax.get_xticks()[:-1]], rotation = 50)
else:
ax.set_xlabel('time (ss.mm)')
ax.set_xticklabels([ "%02d.%02d" % (t/float(samplerate), 100*((t/float(samplerate))%1) ) for t in ax.get_xticks()[:-1]], rotation = 50)
if __name__ == '__main__':
import matplotlib.pyplot as plt
if len(sys.argv) < 2:
print "Usage: %s <filename>" % sys.argv[0]
else:
for soundfile in sys.argv[1:]:
get_waveform_plot(soundfile)
# display graph
plt.show()
| gpl-3.0 |
elingg/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 18 | 3978 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
nansencenter/nansat | nansat/tests/test_nansat.py | 1 | 34081 | # ------------------------------------------------------------------------------
# Name: test_nansat.py
# Purpose: Test the Nansat class
#
# Author: Morten Wergeland Hansen, Asuka Yamakawa, Anton Korosov
#
# Created: 18.06.2014
# Last modified:24.08.2017 14:00
# Copyright: (c) NERSC
# Licence: This file is part of NANSAT. You can redistribute it or modify
# under the terms of GNU General Public License, v.3
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------------------------
from __future__ import unicode_literals, absolute_import
import os
import logging
import unittest
import warnings
import datetime
from mock import patch, PropertyMock, Mock, MagicMock, DEFAULT
import numpy as np
try:
if 'DISPLAY' not in os.environ:
import matplotlib; matplotlib.use('Agg')
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
MATPLOTLIB_IS_INSTALLED = False
else:
MATPLOTLIB_IS_INSTALLED = True
from nansat import Nansat, Domain, NSR
from nansat.utils import gdal
import nansat.nansat
from nansat.exceptions import NansatGDALError, WrongMapperError, NansatReadError
from nansat.tests.nansat_test_base import NansatTestBase
warnings.simplefilter("always", UserWarning)
class NansatTest(NansatTestBase):
def test_open_gcps(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
self.assertEqual(type(n), Nansat)
self.assertEqual(n.vrt.dataset.GetProjection(), '')
self.assertTrue((n.vrt.dataset.GetGCPProjection().startswith('GEOGCS["WGS 84",')))
self.assertEqual(n.vrt.dataset.RasterCount, 3)
self.assertEqual(n.filename, self.test_file_gcps)
self.assertIsInstance(n.logger, logging.Logger)
self.assertEqual(n.name, os.path.split(self.test_file_gcps)[1])
self.assertEqual(n.path, os.path.split(self.test_file_gcps)[0])
def test_that_only_mappers_with_mapper_in_the_module_name_are_imported(self):
mappers = nansat.nansat._import_mappers()
for mapper in mappers:
self.assertTrue('mapper' in mapper)
def test_get_time_coverage_start_end(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.set_metadata('time_coverage_start', '2016-01-20')
n.set_metadata('time_coverage_end', '2016-01-21')
self.assertEqual(type(n.time_coverage_start), datetime.datetime)
self.assertEqual(type(n.time_coverage_end), datetime.datetime)
def test_from_domain_array(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
n = Nansat.from_domain(d, np.random.randn(500, 500), {'name': 'band1'})
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n[1].shape, (500, 500))
self.assertEqual(n.filename, '')
self.assertIsInstance(n.logger, logging.Logger)
self.assertEqual(n.name, '')
self.assertEqual(n.path, '')
def test_from_domain_nansat(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n2 = Nansat.from_domain(n1, n1[1])
self.assertEqual(type(n2), Nansat)
self.assertEqual(len(n2.bands()), 1)
self.assertEqual(type(n2[1]), np.ndarray)
def test_add_band(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_band(arr, {'name': 'band1'})
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n[1].shape, (500, 500))
def test_add_band_twice(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_band(arr, {'name': 'band1'})
n.add_band(arr, {'name': 'band2'})
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(type(n[2]), np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n.get_metadata('name', 2), 'band2')
self.assertEqual(n[1].shape, (500, 500))
self.assertEqual(n[2].shape, (500, 500))
def test_add_bands(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_bands([arr, arr],
[{'name': 'band1'}, {'name': 'band2'}])
self.assertIsInstance(n, Nansat)
self.assertEqual(n.vrt.vrt.vrt, None)
self.assertIsInstance(n[1], np.ndarray)
self.assertIsInstance(n[2], np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n.get_metadata('name', 2), 'band2')
def test_add_bands_no_parameter(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_bands([arr, arr])
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(type(n[2]), np.ndarray)
def test_add_subvrts_only_to_one_nansat(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n1 = Nansat.from_domain(d, log_level=40)
n2 = Nansat.from_domain(d, log_level=40)
n1.add_band(arr, {'name': 'band1'})
self.assertEqual(type(n1.vrt.band_vrts), dict)
self.assertTrue(len(n1.vrt.band_vrts) > 0)
self.assertEqual(n2.vrt.band_vrts, {})
def test_bands(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
bands = n.bands()
self.assertEqual(type(bands), dict)
self.assertTrue(1 in bands)
self.assertTrue('name' in bands[1])
self.assertEqual(bands[1]['name'], 'L_645')
def test_has_band_if_name_matches(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
hb = n.has_band('L_645')
self.assertTrue(hb)
def test_has_band_if_standard_name_matches(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
hb = n.has_band('surface_upwelling_spectral_radiance_in_air_emerging_from_sea_water')
self.assertTrue(hb)
def test_write_fig_tif(self):
n = Nansat(self.test_file_arctic, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_write_fig_tif.tif')
n.write_figure(tmpfilename)
nn = Nansat(tmpfilename, mapper=self.default_mapper)
# Asserts that the basic georeference (corners in this case) is still
# present after opening the image
self.assertTrue(np.allclose(n.get_corners(), nn.get_corners()))
def test_resize_by_pixelsize(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(pixelsize=500, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_by_factor(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_by_width(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(width=100, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_by_height(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(height=500, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_resize(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(0.1)
n.resize(10)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_resize_resize.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_complex_alg_average(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
with warnings.catch_warnings(record=True) as w:
n.resize(0.5, resample_alg=-1)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, UserWarning))
self.assertIn('The imaginary parts of complex numbers '
'are lost when resampling by averaging ', str(w[-1].message))
def test_resize_complex_alg0(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=0)
self.assertTrue(np.any(n[1].imag != 0))
def test_resize_complex_alg1(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=1)
self.assertTrue(np.any(n[1].imag != 0))
def test_resize_complex_alg2(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=2)
self.assertTrue(np.any(n[1].imag != 0))
def test_resize_complex_alg3(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=3)
self.assertTrue(np.any(n[1].imag != 0))
def test_resize_complex_alg4(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=4)
self.assertTrue(np.any(n[1].imag != 0))
def test_get_GDALRasterBand(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
b = n.get_GDALRasterBand(1)
arr = b.ReadAsArray()
self.assertEqual(type(b), gdal.Band)
self.assertEqual(type(arr), np.ndarray)
def test_get_GDALRasterBand_if_band_id_is_given(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
b = n.get_GDALRasterBand(band_id=1)
arr = b.ReadAsArray()
self.assertEqual(type(b), gdal.Band)
self.assertEqual(type(arr), np.ndarray)
def test_list_bands_true(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
lb = n.list_bands(True)
self.assertEqual(lb, None)
def test_list_bands_false(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
lb = n.list_bands(False)
self.assertEqual(type(lb), str)
def test_reproject_domain(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.reproject(d)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_reproject_domain.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
def test_reproject_domain_if_dst_domain_is_given(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.reproject(dst_domain=d)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_reproject_domain.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
def test_reproject_domain_if_resample_alg_is_given(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.reproject(d, resample_alg=0)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_reproject_domain.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
@patch.object(Nansat, 'get_corners',
return_value=(np.array([0, 0, 360, 360]), np.array([90,-90, 90, -90])))
def test_reproject_domain_if_source_and_destination_domain_span_entire_lons(self, mock_Nansat):
n = Nansat(self.test_file_arctic, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te -180 180 60 90 -ts 500 500")
n.reproject(d)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_reproject_domain_span_entire_lons.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
def test_reproject_domain_if_tps_is_given(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.reproject(d, tps=False)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_domain.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.reproject(d, tps=True)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_domain.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
def test_reproject_of_complex(self):
""" Should return np.nan in areas out of swath """
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
d = Domain(4326, '-te -92.08 26.85 -92.00 26.91 -ts 200 200')
n.reproject(d)
b = n[1]
self.assertTrue(n.has_band('swathmask'))
self.assertTrue(np.isnan(b[0, 0]))
self.assertTrue(np.isfinite(b[100, 100]))
def test_add_band_and_reproject(self):
""" Should add band and swath mask and return np.nan in areas out of swath """
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.add_band(np.ones(n.shape(), np.uint8))
n.reproject(d)
b4 = n[4] # added, reprojected band
b5 = n[5] # swathmask
self.assertTrue(n.has_band('swathmask')) # the added band
self.assertTrue(n.has_band('swathmask_0000')) # the actual swathmask
self.assertTrue(b4[0, 0]==0)
self.assertTrue(b4[300, 300] == 1)
self.assertTrue(b5[0, 0]==0)
self.assertTrue(b5[300, 300] == 1)
def test_reproject_no_addmask(self):
""" Should not add swath mask and return 0 in areas out of swath """
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, '-te -92.08 26.85 -92.00 26.91 -ts 200 200')
n.reproject(d, addmask=False)
b = n[1]
self.assertTrue(not n.has_band('swathmask'))
self.assertTrue(np.isfinite(b[0, 0]))
self.assertTrue(np.isfinite(b[100, 100]))
def test_reproject_stere(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.reproject(n2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_stere.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape(), n2.shape())
self.assertEqual(type(n1[1]), np.ndarray)
def test_reproject_gcps(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n1.reproject(n2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_gcps.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape(), n2.shape())
self.assertEqual(type(n1[1]), np.ndarray)
def test_reproject_gcps_on_repro_gcps(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n2.reproject_gcps()
n1.reproject(n2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_gcps_on_repro_gcps.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape(), n2.shape())
self.assertEqual(type(n1[1]), np.ndarray)
def test_reproject_gcps_resize(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n1.reproject(n2)
n1.resize(2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_gcps_resize.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape()[0], n2.shape()[0] * 2)
self.assertEqual(n1.shape()[1], n2.shape()[1] * 2)
self.assertEqual(type(n1[1]), np.ndarray)
def test_undo(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
shape1 = n1.shape()
n1.resize(10)
n1.undo()
shape2 = n1.shape()
self.assertEqual(shape1, shape2)
def test_write_figure(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure.png')
n1.write_figure(tmpfilename)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_band(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_band.png')
n1.write_figure(tmpfilename, 2)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_clim(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_clim.png')
n1.write_figure(tmpfilename, 3, clim='hist')
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_legend(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_legend.png')
n1.write_figure(tmpfilename, 3, clim='hist', legend=True, titleString="Title String")
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_logo(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_logo.png')
n1.write_figure(tmpfilename, 3, clim='hist',
logoFileName=self.test_file_gcps)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_geotiffimage(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_geotiffimage.tif')
n1.write_geotiffimage(tmpfilename)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_geotiffimage_if_band_id_is_given(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_geotiffimage.tif')
n1.write_geotiffimage(tmpfilename, band_id=1)
self.assertTrue(os.path.exists(tmpfilename))
def test_get_metadata(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata()
self.assertEqual(type(m), dict)
self.assertTrue('filename' in m)
def test_get_metadata_key(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata('filename')
self.assertEqual(type(m), str)
def test_get_metadata_wrong_key(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
with self.assertRaises(ValueError):
n1.get_metadata('some_crap')
def test_get_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata(band_id=1)
self.assertEqual(type(m), dict)
self.assertTrue('name' in m)
def test_get_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata(band_id=1)
self.assertEqual(type(m), dict)
self.assertTrue('name' in m)
def test_set_metadata(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.set_metadata('newKey', 'newVal')
m = n1.get_metadata('newKey')
self.assertEqual(m, 'newVal')
def test_set_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.set_metadata('newKey', 'newVal', band_id=1)
m = n1.get_metadata('newKey', 1)
self.assertEqual(m, 'newVal')
def test_set_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.set_metadata('newKey', 'newVal', band_id=1)
m = n1.get_metadata('newKey', 1)
self.assertEqual(m, 'newVal')
def test_get_band_number(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
self.assertEqual(n1.get_band_number(1), 1)
@unittest.skipUnless(MATPLOTLIB_IS_INSTALLED, 'Matplotlib is required')
def test_get_transect(self):
plt.switch_backend('agg')
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
t = n1.get_transect([[28.31299128, 28.93691525],
[70.93709219, 70.69646524]],
[str('L_645')])
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_get_transect.png')
plt.plot(t['lat'], t['L_645'], '.-')
plt.savefig(tmpfilename)
plt.close('all')
self.assertTrue('L_645' in t.dtype.fields)
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields)
self.assertTrue('lon' in t.dtype.fields)
self.assertEqual(type(t['lat']), np.ndarray)
self.assertEqual(type(t['lon']), np.ndarray)
def test_get_transect_outside(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
t = n1.get_transect([[0, 28.31299128], [0, 70.93709219]], [1])
self.assertTrue('L_645' in t.dtype.fields)
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields)
self.assertTrue('lon' in t.dtype.fields)
self.assertEqual(type(t['lat']), np.ndarray)
self.assertEqual(type(t['lon']), np.ndarray)
def test_get_transect_wrong_points(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
self.assertRaises(ValueError, n1.get_transect, [1, 1], [1])
def test_get_transect_wrong_band(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
t = n1.get_transect([[0, 28.31299128], [0, 70.93709219]], [10])
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields)
self.assertTrue('lon' in t.dtype.fields)
self.assertEqual(type(t['lat']), np.ndarray)
self.assertEqual(type(t['lon']), np.ndarray)
def test_get_transect_pixlin(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
t = n1.get_transect([[10, 20],
[10, 10]],
[str('L_645')],
lonlat=False)
self.assertTrue('L_645' in t.dtype.fields)
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields)
self.assertTrue('lon' in t.dtype.fields)
self.assertEqual(type(t['lat']), np.ndarray)
self.assertEqual(type(t['lon']), np.ndarray)
self.assertEqual(len(t['lon']), 11)
def test_get_transect_data(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
b1 = n1[1]
t = n1.get_transect([[28.3], [70.9]], [], data=b1)
self.assertTrue('input' in t.dtype.fields)
self.assertTrue('L_645' not in t.dtype.fields)
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields)
self.assertTrue('lon' in t.dtype.fields)
self.assertEqual(type(t['lat']), np.ndarray)
self.assertEqual(type(t['lon']), np.ndarray)
@patch('nansat.nansat.PointBrowser')
def test_digitize_points(self, mock_PointBrowser):
""" shall create PointBrowser and call PointBrowser.get_points() """
value = 'points'
mock_PointBrowser().get_points.return_value = value
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
points = n.digitize_points(1)
self.assertTrue(mock_PointBrowser.called_once())
self.assertEqual(points, value)
def test_crop(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
ext = n1.crop(10, 20, 50, 60)
self.assertEqual(n1.shape(), (60, 50))
self.assertEqual(ext, (10, 20, 50, 60))
self.assertEqual(type(n1[1]), np.ndarray)
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
ext = n1.crop(0, 0, 200, 200)
self.assertEqual(n1.shape(), (200, 200))
self.assertEqual(ext, (0, 0, 200, 200))
self.assertEqual(type(n1[1]), np.ndarray)
def test_crop_gcpproj(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n1.reproject_gcps()
ext = n1.crop(10, 20, 50, 60)
xmed = abs(np.median(np.array([gcp.GCPX
for gcp in n1.vrt.dataset.GetGCPs()])))
gcpproj = NSR(n1.vrt.dataset.GetGCPProjection()
).ExportToProj4().split(' ')[0]
self.assertTrue(xmed > 360)
self.assertTrue(gcpproj=='+proj=stere')
def test_crop_complex(self):
n1 = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
ext = n1.crop(10, 20, 50, 60)
self.assertEqual(n1.shape(), (60, 50))
self.assertEqual(ext, (10, 20, 50, 60))
self.assertEqual(type(n1[1]), np.ndarray)
def test_crop_no_gcps_arctic(self):
n1 = Nansat(self.test_file_arctic, log_level=40, mapper=self.default_mapper)
ext = n1.crop(10, 20, 50, 60)
self.assertEqual(n1.shape(), (60, 50))
self.assertEqual(ext, (10, 20, 50, 60))
self.assertEqual(type(n1[1]), np.ndarray)
def test_crop_lonlat(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
ext = n1.crop_lonlat([28, 29], [70.5, 71])
self.assertEqual(n1.shape(), (111, 110))
self.assertEqual(ext, (31, 89, 110, 111))
self.assertEqual(type(n1[1]), np.ndarray)
def test_crop_outside(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
self.assertRaises(ValueError, n1.crop_lonlat, [-10, 10], [-10, 10])
def test_watermask(self):
""" if watermask data exists: should fetch array with watermask
else: should raise an error """
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
mod44path = os.getenv('MOD44WPATH')
if mod44path is not None and os.path.exists(mod44path + '/MOD44W.vrt'):
wm = n1.watermask()[1]
self.assertEqual(type(wm), np.ndarray)
self.assertEqual(wm.shape[0], n1.shape()[0])
self.assertEqual(wm.shape[1], n1.shape()[1])
def test_watermask_fail_if_mod44path_is_wrong(self):
""" Nansat.watermask should raise an IOError"""
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
os.environ['MOD44WPATH'] = '/fakepath'
self.assertRaises(IOError, n1.watermask)
def test_watermask_fail_if_mod44path_not_exist(self):
""" Nansat.watermask should raise an IOError"""
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
del os.environ['MOD44WPATH']
self.assertRaises(IOError, n1.watermask)
def test_init_no_arguments(self):
""" No arguments should raise ValueError """
self.assertRaises(ValueError, Nansat)
def test_get_item_basic_expressions(self):
""" Testing get_item with some basic expressions """
self.mock_pti['get_wkv_variable'].return_value=dict(short_name='newband')
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
n = Nansat.from_domain(d, np.zeros((500, 500)), {'expression': 'np.ones((500, 500))'})
self.assertIsInstance(n[1], np.ndarray)
self.assertEqual(n[1].shape, (500, 500))
band1 = n[1]
self.assertTrue(np.allclose(band1, np.ones((500, 500))))
def test_get_item_inf_expressions(self):
""" inf should be replaced with nan """
self.mock_pti['get_wkv_variable'].return_value=dict(short_name='newband')
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
n = Nansat.from_domain(d, log_level=40)
arr = np.empty((500, 500))
n.add_band(arr, {'expression': 'np.array([0,1,2,3,np.inf,5,6,7])'})
self.assertIsInstance(n[1], np.ndarray)
self.assertTrue(np.isnan(n[1][4]))
def test_repr_basic(self):
""" repr should include some basic elements """
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
n = Nansat.from_domain(d, log_level=40)
arr = np.empty((500, 500))
exp = 'np.array([0,1,2,3,np.inf,5,6,7])'
n.add_band(arr, {'expression': exp})
n_repr = repr(n)
self.assertIn(exp, n_repr, 'The expressions should be in repr')
self.assertIn('SourceFilename', n_repr)
self.assertIn('/vsimem/', n_repr)
self.assertIn('500 x 500', n_repr)
self.assertIn('Projection(dataset):', n_repr)
self.assertIn('25', n_repr)
self.assertIn('72', n_repr)
self.assertIn('35', n_repr)
self.assertIn('70', n_repr)
@patch.object(Nansat, 'get_GDALRasterBand')
def test_getitem(self, mock_Nansat):
type(mock_Nansat()).GetMetadata = MagicMock(return_value={'a':1})
type(mock_Nansat()).ReadAsArray = MagicMock(return_value=None)
with self.assertRaises(NansatGDALError):
Nansat(self.test_file_stere, mapper=self.default_mapper).__getitem__(1)
@patch.object(Nansat, 'digitize_points')
def test_crop_interactive(self, mock_digitize_points):
mock_digitize_points.return_value=[np.array([[10, 20], [10, 30]])]
n = Nansat(self.test_file_arctic, log_level=40, mapper=self.default_mapper)
n.crop_interactive()
self.assertEqual(n.shape(), (20, 10))
def test_extend(self):
n = Nansat(self.test_file_arctic, log_level=40, mapper=self.default_mapper)
nshape1 = n.shape()
n.extend(left=10, right=20, top=30, bottom=40)
be = n[1]
self.assertEqual(n.shape(), (nshape1[0]+70, nshape1[1]+30))
self.assertIsInstance(be, np.ndarray)
def test_open_no_mapper(self):
n = Nansat(self.test_file_arctic)
self.assertEqual(type(n), Nansat)
self.assertEqual(n.mapper, 'netcdf_cf')
@patch.multiple(Nansat, vrt=DEFAULT, __init__ = Mock(return_value=None))
def test_get_metadata_unescape(self, vrt):
meta0 = {"key1": "" AAA " & > <", "key2": "'BBB'"}
n = Nansat()
vrt.dataset.GetMetadata.return_value = meta0
meta1 = n.get_metadata()
meta2 = n.get_metadata(unescape=False)
self.assertEqual(meta1, {'key1': '" AAA " & > <', 'key2': "'BBB'"})
self.assertEqual(meta2, meta0)
def test_reproject_pure_geolocation(self):
n0 = Nansat(self.test_file_gcps)
b0 = n0[1]
lon0, lat0 = n0.get_geolocation_grids()
d1 = Domain.from_lonlat(lon=lon0, lat=lat0)
d2 = Domain.from_lonlat(lon=lon0, lat=lat0, add_gcps=False)
d3 = Domain(NSR().wkt, '-te 27 70 31 72 -ts 500 500')
n1 = Nansat.from_domain(d1, b0)
n2 = Nansat.from_domain(d2, b0)
n1.reproject(d3)
n2.reproject(d3)
b1 = n1[1]
b2 = n2[1]
self.assertTrue(np.allclose(b1,b2))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
dhyeon/ingredient2vec | src/Ingredient2Vec.py | 1 | 6814 | # import libraries
import gensim
import random
import pandas as pd
import numpy as np
from itertools import combinations
# import implemented python files
import Config
from utils import DataLoader, GensimModels, DataPlotter
class Ingredient2Vec:
def __init__(self):
print "\n\n...Ingredient2Vec initialized"
def num_combinations(self, n, k):
return math.factorial(n) / (math.factorial(k) * math.factorial(n-k))
def build_taggedDocument_cc(self, ingredients, compounds, relations, filtering=5, random_sampling=False, num_sampling=0):
compound_length_list = []
print '\n\n...Building Training Set'
# relations
for ingr in relations:
# ingredients
ingredient_name, ingredient_category = ingredients[ingr][0], ingredients[ingr][1]
# compounds in a ingredient
compound_list = []
for comp in relations[ingr]:
compound_name, compound_cas = compounds[comp][0], compounds[comp][1]
compound_list.append(compound_name)
# filter by number of compounds
if len(compound_list) > filtering:
compound_length_list.append(len(compound_list))
# Random Sampling
if random_sampling:
#print ingredient_name, len(compound_list), len(compound_list)/num_sampling*3
#all the combinations
#sample_list = ["".join(x) for x in combinations(compound_list, 5)]
#print len(sample_list)
#sample randomly
for i in xrange(num_sampling):
sampled_compounds = random.sample(compound_list, filtering)
yield gensim.models.doc2vec.TaggedDocument(compound_list, [ingredient_name])
# Just Sampling
else:
yield gensim.models.doc2vec.TaggedDocument(compound_list, [ingredient_name])
print 'Sampling %d' % (num_sampling)
print 'Filter %d' % (filtering)
print 'Number of ingredients : %d' % (len(compound_length_list))
print 'Average Length of Compounds: %f' % (reduce(lambda x, y: x + y, compound_length_list) / float(len(compound_length_list)))
def build_sentences_ic(self, recipe, filtering=5, random_sampling=False, num_sampling=0):
print '\n\n...Building Training Set'
sentences = []
with open(recipe, 'r') as f:
for line in f:
sentence = line.rstrip().split(",")[1:]
sentences.append(sentence)
return sentences
def build_taggedDocument_df(self, df, filtering=5, random_sampling=False, num_sampling=0):
print '\n\n...Building Training Set'
for index, row in df.iterrows():
compound_list = row['text'].split(" ")
#print compound_list
ingredient_name = row['label'].decode("utf8").strip()
# Random Sampling
if random_sampling:
#print ingredient_name, len(compound_list), len(compound_list)/num_sampling*3
#all the combinations
#sample_list = ["".join(x) for x in combinations(compound_list, 5)]
#print len(sample_list)
#sample randomly
for i in xrange(num_sampling):
sampled_compounds = random.sample(compound_list, filtering)
yield gensim.models.doc2vec.TaggedDocument(compound_list, [ingredient_name])
# Just Sampling
else:
yield gensim.models.doc2vec.TaggedDocument(compound_list, [ingredient_name])
if __name__ == '__main__':
dataLoader = DataLoader.DataLoader()
gensimLoader = GensimModels.GensimModels()
ingr2vec = Ingredient2Vec()
"""
Mode Description
# mode 1 : Embed Ingredients with Chemical Compounds
# mode 3 : Embed Ingredinets with other Ingredient Context
# mode 999 : Plot Loaded Word2Vec or Doc2vec
"""
mode = 999
if mode == 1:
"""
Load Data
"""
# load list of compounds to dict
ingredients = dataLoader.load_ingredients(Config.path_ingr_info)
compounds = dataLoader.load_compounds(Config.path_comp_info)
relations = dataLoader.load_relations(Config.path_ingr_comp)
"""
Preprocess Data
"""
# build taggedDocument form of corpus
corpus_ingr2vec_cc = list(ingr2vec.build_taggedDocument_cc(ingredients, compounds, relations, filtering=Config.FILTERING, random_sampling=Config.RANDOM_SAMPLING, num_sampling=Config.NUM_SAMPLING))
"""
Build & Save Doc2Vec
"""
# build ingredient embeddings with doc2vec
#model_ingr2vec_cc = gensimLoader.build_doc2vec(corpus_ingr2vec_cc, load_pretrained=Config.CHAR_EMB, path_pretrained=Config.path_embeddings_compounds_inchi)
model_ingr2vec_cc = gensimLoader.build_doc2vec(corpus_ingr2vec_cc, load_pretrained=False, path_pretrained=False)
# save character-level compounds embeddings with doc2vec
gensimLoader.save_doc2vec_only_doc(model=model_ingr2vec_cc, path=Config.path_embeddings_compounds_rnd)
model_loaded = gensimLoader.load_word2vec(path=Config.path_embeddings_compounds_rnd)
#for x in model_loaded.vocab:
# print x, model_loaded.word_vec(x)
if mode == 2:
"""
Load Data
"""
#ingredient_sentence = "../data/scientific_report/D7_flavornet-vocab-compounds.csv"
ingredient_sentence = "../data/scientific_report/flavordb_ver2.0.csv"
df = pd.read_csv(ingredient_sentence)
"""
Preprocess Data
"""
# build taggedDocument form of corpus
corpus_ingr2vec = list(ingr2vec.build_taggedDocument_df(df, filtering=Config.FILTERING, random_sampling=Config.RANDOM_SAMPLING, num_sampling=Config.NUM_SAMPLING))
"""
Build & Save Doc2Vec
"""
# build ingredient embeddings with doc2vec
model_ingr2vec = gensimLoader.build_doc2vec(corpus_ingr2vec, load_pretrained=False, path_pretrained=False)
# save character-level compounds embeddings with doc2vec
gensimLoader.save_doc2vec_only_doc(model=model_ingr2vec, path=Config.path_embeddings_compounds_rnd)
model_loaded = gensimLoader.load_word2vec(path=Config.path_embeddings_compounds_rnd)
#for x in model_loaded.vocab:
# print x, model_loaded.word_vec(x)
elif mode == 3:
"""
Load Data & Preprocess Data
"""
corpus_ingr2vec_ic = ingr2vec.build_sentences_ic(Config.path_culture, filtering=Config.FILTERING, random_sampling=Config.RANDOM_SAMPLING, num_sampling=Config.NUM_SAMPLING)
"""
Build & Save Doc2Vec
"""
# build ingredient embeddings with word2vec
model_ingr2vec_ic = gensimLoader.build_word2vec(corpus_ingr2vec_ic, load_pretrained=False, path_pretrained="")
# save embeddings
gensimLoader.save_word2vec(model=model_ingr2vec_ic, path=Config.path_embeddings_ingredients_ic)
model_loaded = gensimLoader.load_word2vec(path=Config.path_embeddings_ingredients_ic)
#for x in model_loaded.vocab:
# print x, model_loaded.word_vec(x)
elif mode == 999:
"""
Plot Ingredient2Vec
"""
model_loaded = gensimLoader.load_word2vec(path=Config.path_embeddings_compounds_rnd)
model_tsne = DataPlotter.load_TSNE(model_loaded, dim=2)
DataPlotter.plot_category(model_loaded, model_tsne, Config.path_embeddings_compounds_rnd, withLegends=False)
#DataPlotter.plot_clustering(model_loaded, model_tsne, Config.path_plottings_ingredients_clustering)
else:
print "Please specify the mode you want."
| apache-2.0 |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| mit |
jakirkham/bokeh | sphinx/source/conf.py | 3 | 9540 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os.path import abspath, dirname, join
#
# Bokeh documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 12 23:43:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.7'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_color',
'bokeh.sphinxext.bokeh_enum',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_options',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_palette_group',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.bokeh_releases',
'bokeh.sphinxext.bokeh_sitemap',
'bokeh.sphinxext.collapsible_code_block',
]
napoleon_include_init_with_doc = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bokeh'
copyright = '© Copyright 2015-2018, Anaconda and Bokeh Contributors.'
# Get the standard computed Bokeh version string to use for |version|
# and |release|
from bokeh import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Check for version override (e.g. when re-deploying a previously released
# docs, or when pushing test docs that do not have a corresponding BokehJS
# available on CDN)
from bokeh.settings import settings
if settings.docs_version():
version = release = settings.docs_version()
# get all the versions that will appear in the version dropdown
f = open(join(dirname(abspath(__file__)), "all_versions.txt"))
all_versions = [x.strip() for x in reversed(f.readlines())]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#
# NOTE: in these docs all .py script are assumed to be bokeh plot scripts!
# with bokeh_plot_pyfile_include_dirs set desired folder to look for .py files
bokeh_plot_pyfile_include_dirs = ['docs']
# Whether to allow builds to succeed if a Google API key is not defined and plots
# containing "GOOGLE_API_KEY" are processed
bokeh_missing_google_api_key_ok = False
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Sort members by type
autodoc_member_order = 'groupwise'
# patterns to exclude
exclude_patterns = ['docs/releases/*']
# This would more properly be done with rst_epilog but something about
# the combination of this with the bokeh-gallery directive breaks the build
rst_prolog = """
.. |Color| replace:: :py:class:`~bokeh.core.properties.Color`
.. |DataSpec| replace:: :py:class:`~bokeh.core.properties.DataSpec`
.. |Document| replace:: :py:class:`~bokeh.document.Document`
.. |HasProps| replace:: :py:class:`~bokeh.core.has_props.HasProps`
.. |Model| replace:: :py:class:`~bokeh.model.Model`
.. |Property| replace:: :py:class:`~bokeh.core.property.bases.Property`
.. |PropertyDescriptor| replace:: :py:class:`~bokeh.core.property.descriptor.PropertyDescriptor`
.. |PropertyContainer| replace:: :py:class:`~bokeh.core.property.wrappers.PropertyContainer`
.. |UnitsSpec| replace:: :py:class:`~bokeh.core.properties.UnitsSpec`
.. |field| replace:: :py:func:`~bokeh.core.properties.field`
.. |value| replace:: :py:func:`~bokeh.core.properties.value`
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bokeh_theme'
html_theme_path = ['.']
html_context = {
'SITEMAP_BASE_URL': 'https://bokeh.pydata.org/en/', # Trailing slash is needed
'DESCRIPTION': 'Bokeh visualization library, documentation site.',
'AUTHOR': 'Bokeh contributors',
'VERSION': version,
'NAV': (
('Github', '//github.com/bokeh/bokeh'),
),
'ABOUT': (
('Vision and Work', 'vision'),
('Team', 'team'),
('Citation', 'citation'),
('Contact', 'contact'),
),
'SOCIAL': (
('Contribute', 'contribute'),
('Mailing list', '//groups.google.com/a/anaconda.com/forum/#!forum/bokeh'),
('Github', '//github.com/bokeh/bokeh'),
('Twitter', '//twitter.com/BokehPlots'),
),
'NAV_DOCS': (
('Installation', 'installation'),
('User Guide', 'user_guide'),
('Gallery', 'gallery'),
('Tutorial', 'https://mybinder.org/v2/gh/bokeh/bokeh-notebooks/master?filepath=tutorial%2F00%20-%20Introduction%20and%20Setup.ipynb'),
('Reference', 'reference'),
('Releases', 'releases'),
('Developer Guide', 'dev_guide'),
),
'ALL_VERSIONS': all_versions,
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bokehdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bokeh.tex', u'Bokeh Documentation', u'Anaconda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bokeh', u'Bokeh Documentation',
[u'Anaconda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bokeh', u'Bokeh Documentation', u'Anaconda', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# intersphinx settings
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)
}
| bsd-3-clause |
calum-chamberlain/EQcorrscan | eqcorrscan/doc/conf.py | 2 | 7509 | # -*- coding: utf-8 -*-
#
# EQcorrscan documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 23 21:20:41 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
sys.path.insert(0, os.path.abspath('../..'))
import matplotlib
import eqcorrscan
import sphinx_bootstrap_theme
# Use mock to allow for autodoc compilation without needing C based modules
import mock
import glob
READ_THE_DOCS = os.environ.get('READTHEDOCS', None) == 'True'
MOCK_MODULES = ['cv2', 'h5py', 'eqcorrscan.utils.libnames']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../core'))
sys.path.insert(0, os.path.abspath('../utils'))
sys.path = [os.path.dirname(__file__) + os.sep + '_ext'] + sys.path
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.1'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
# 'matplotlib.sphinxext.mathmpl',
# 'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
# local extensions
'sphinx.ext.autosummary',
'obspydoc',
'nbsphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'EQcorrscan'
copyright = u'2015-2021: EQcorrscan developers'
author = u'EQcorrscan developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = eqcorrscan.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# File formats to generate.
plot_formats = [('png', 110), ('hires.png', 200)]
if READ_THE_DOCS:
plot_formats += [('pdf', 200)]
plot_html_show_formats = True
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
'bootswatch_theme': "sandstone",
}
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'EQcorrscan'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = './EQcorrscan_logo.jpg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = './EQcorrscan_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'EQcorrscandoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'EQcorrscan.tex', u'EQcorrscan Documentation',
u'Calum John Chamberlain', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'EQcorrscan_logo.pdf'
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'eqcorrscan', u'EQcorrscan Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'EQcorrscan', u'EQcorrscan Documentation',
author, 'EQcorrscan', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/2.7/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.org/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/latest/', None),
'obspy': ('https://docs.obspy.org/', None),
}
# generate automatically stubs
autosummary_generate = glob.glob("submodules" + os.sep + "*.rst")
# Don't merge __init__ method in auoclass content
autoclass_content = 'class'
# This value is a list of autodoc directive flags that should be automatically
# applied to all autodoc directives. The supported flags are 'members',
# 'undoc-members', 'private-members', 'special-members', 'inherited-members' an
# 'show-inheritance'. Don't set it to anything !
autodoc_default_flags = ['show-inheritance']
# warn about *all* references where the target cannot be found
nitpicky = False
trim_doctest_flags = True
| gpl-3.0 |
chatcannon/scipy | scipy/special/add_newdocs.py | 4 | 133813 | # Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/.org/amos/
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `k` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `k + 1` through `n` of the binomial probability density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `k + 1` or more successes in `n` independent events
with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function `bei`
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function `ber`
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a, b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to `x`::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute `x` such that betainc(a, b, x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
r"""
btdtr(a, b, x)
Cumulative density function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative density function of the beta distribution with parameters
`a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of `x`
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v, x)
Chi square survival function
Returns the area under the right hand tail (from `x` to
infinity) of the Chi square probability density function with `v`
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v, p)
Inverse to `chdtrc`
Returns the argument x such that ``chdtrc(v, x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to `chdtr` vs `v`
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \\approx P(1-m) - (1-m) \\log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \\sqrt(1-m)
is used.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipk(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipk(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
This function is also called `F(phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, ``1 - erf(x)``.
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, ``-i erf(i z)``.
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t, t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t, t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer `n` and non-negative `x` and
`n`::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative density function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2), t=0..z)
csa = integral(cos(pi/2 * t**2), t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a, x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a, x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a, y)
Inverse to `gammaincc`
Returns `x` such that ``gammaincc(a, x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to `gammainc`
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
gammaln(z)
Performs a logarithmic transformation of the
values of the gamma function in one of two
ways, depending on the input `z`:
1) `z` is not complex (i.e. `z` is a purely
real number *or* it is array_like and
contains purely real elements)
The natural logarithm of the absolute value of
gamma(z) is computed. Thus, it is defined as:
ln(abs(gamma(z)))
2) `z` is complex (i.e. `z` is a complex
number *or* it is array_like and contains
at least one complex element)
The natural logarithm of gamma(z) is computed.
Thus, it is defined as:
ln((gamma(z))
See Also
--------
gammasgn
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
""")
add_newdoc("scipy.special", "gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative density function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtri
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp0f1",
r"""
hyp0f1(v, x)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) = f(z)`.
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order modified
Bessel functions `i0` and `k0`.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order Bessel
functions `j0` and `y0`.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptitic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptitic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptitic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptitic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
See also
--------
jv : Bessel function of real order and complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
See also
--------
jv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
See also
--------
jv
""")
add_newdoc("scipy.special", "jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in `x` and `y`.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. http://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("scipy.special", "kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when `x` is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : float
Degree.
x : float
Argument. Must be ``|x| <= 1``.
Returns
-------
res : float
The value of the function.
See Also
--------
lpmn : Similar, but computes values for all orders 0..m and degrees 0..n.
clpmn : Similar to `lpmn` but allows a complex argument.
Notes
-----
It is possible to extend the domain of this function to all
complex m, v, x, but this is not yet implemented.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
"""
ndtr(x)
Gaussian cumulative distribution function
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x)
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a, x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a, x) in w and the
derivative, W'(a, x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first `k` terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z)
Digamma function
The derivative of the logarithm of the gamma function evaluated at
`z` (also called the digamma function).
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to `x` as a double precision floating
point result. If `x` ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t, t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t, t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on `n` samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to `smirnov`
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
"""
spence(x)
Dilogarithm integral
Returns the dilogarithm integral::
-integral(log t / (t-1), t=1..x)
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df, t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.wofz(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$wofz(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "zeta",
"""
zeta(x, q)
Hurwitz zeta function
The Riemann zeta function of two arguments (also known as the
Hurwitz zeta function).
This function is defined as
.. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} 1 / (k+q)^x,
where ``x > 1`` and ``q > 0``.
See also
--------
zetac
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
| bsd-3-clause |
tridao/cvxpy | examples/relax_and_round.py | 12 | 6062 | # Relax and round example for talk.
from __future__ import division
from cvxpy import *
import numpy
# def bool_vars(prob):
# return [var for var in prob.variables() if var.boolean]
def cvx_relax(prob):
new_constr = []
for var in prob.variables():
if getattr(var, 'boolean', False):
new_constr += [0 <= var, var <= 1]
return Problem(prob.objective,
prob.constraints + new_constr)
def round_and_fix(prob):
prob.solve()
new_constr = []
for var in prob.variables():
if getattr(var, 'boolean', False):
new_constr += [var == numpy.round(var.value)]
return Problem(prob.objective,
prob.constraints + new_constr)
def branch_and_bound(n, A, B, c):
from Queue import PriorityQueue
x = Variable(n)
z = Variable(n)
L = Parameter(n)
U = Parameter(n)
prob = Problem(Minimize(sum_squares(A*x + B*z - c)),
[L <= z, z <= U])
visited = 0
best_z = None
f_best = numpy.inf
nodes = PriorityQueue()
nodes.put((numpy.inf, 0, numpy.zeros(n), numpy.ones(n), 0))
while not nodes.empty():
visited += 1
# Evaluate the node with the lowest lower bound.
_, _, L_val, U_val, idx = nodes.get()
L.value = L_val
U.value = U_val
lower_bound = prob.solve()
z_star = numpy.round(z.value)
upper_bound = Problem(prob.objective, [z == z_star]).solve()
f_best = min(f_best, upper_bound)
if upper_bound == f_best:
best_z = z_star
# Add new nodes if not at a leaf and the branch cannot be pruned.
if idx < n and lower_bound < f_best:
for i in [0, 1]:
L_val[idx] = U_val[idx] = i
nodes.put((lower_bound, i, L_val.copy(), U_val.copy(), idx + 1))
#print("Nodes visited: %s out of %s" % (visited, 2**(n+1)-1))
return f_best, best_z
# def round_and_fix2(prob, thresh):
# prob.solve()
# new_constr = []
# for var in bool_vars(prob):
# new_constr += [var == (var.value > thresh)]
# return Problem(prob.objective, prob.constraints + new_constr)
# def round_and_fix3(prob, thresh):
# prob.solve()
# new_constr = []
# for var in bool_vars(prob):
# print var.value
# new_constr += [(var.value > 1 - thresh ) <= var,
# var <= ~(var.value <= thresh)]
# return Problem(prob.objective, prob.constraints + new_constr)
numpy.random.seed(1)
# Min sum_squares(A*x + B*z - c)
# z boolean.
def example(n, get_vals=False):
print "n = %d #################" % n
m = 2*n
A = numpy.matrix(numpy.random.randn(m, n))
B = numpy.matrix(numpy.random.randn(m, n))
sltn = (numpy.random.randn(n, 1),
numpy.random.randint(2, size=(n, 1)))
noise = numpy.random.normal(size=(m, 1))
c = A.dot(sltn[0]) + B.dot(sltn[1]) + noise
x = Variable(n)
#x.boolean = False
z = Variable(n)
z.boolean = True
obj = sum_squares(A*x + B*z - c)
prob = Problem(Minimize(obj))
relaxation = cvx_relax(prob)
print "relaxation", relaxation.solve()
rel_z = z.value
rounded = round_and_fix(relaxation)
rounded.solve()
print "relax and round", rounded.value
truth, true_z = branch_and_bound(n, A, B, c)
print "true optimum", truth
if get_vals:
return (rel_z, z.value, true_z)
return (relaxation.value, rounded.value, truth)
# Plot relaxation z_star.
import matplotlib.pyplot as plt
n = 20
vals = range(1, n+1)
relaxed, rounded, truth = map(numpy.asarray, example(n, True))
plt.figure(figsize=(6,4))
plt.plot(vals, relaxed, 'ro')
plt.axhline(y=0.5,color='k',ls='dashed')
plt.xlabel(r'$i$')
plt.ylabel(r'$z^\mathrm{rel}_i$')
plt.show()
# Plot optimal values.
import matplotlib.pyplot as plt
relaxed = []
rounded = []
truth = []
vals = range(1, 36)
for n in vals:
results = example(n)
results = map(lambda x: numpy.around(x, 3), results)
relaxed.append(results[0])
rounded.append(results[1])
truth.append(results[2])
plt.figure(figsize=(6,4))
plt.plot(vals, rounded, vals, truth, vals, relaxed)
plt.xlabel("n")
plt.ylabel("Objective value")
plt.legend(["Relax and round value", "Global optimum", "Lower bound"], loc=2)
plt.show()
# m = 10
# n = 8
# nnz = 5
# A = numpy.random.randn(m, n)
# solution = numpy.random.randint(2, size=(n, 1))
# b = A.dot(solution)
# x = Variable(n)
# y = Variable(n)
# x.boolean = False
# y.boolean = True
# U = 100
# L = -100
# obj = sum_squares(A*x - b)
# constraints = [L*y <= x, x <= U*y,
# sum_entries(y) <= nnz]
# prob = Problem(Minimize(obj), constraints)
# relaxation = cvx_relax(prob)
# print relaxation.solve()
# rounded = relaxation
# K = 4
# for i in range(K+1):
# rounded = round_and_fix3(rounded, i/(2*K))
# print rounded.solve()
# print numpy.around(x.value, 2)
# print numpy.around(y.value, 2)
# # Warehouse operation.
# # http://web.mit.edu/15.053/www/AMP-Chapter-09.pdf
# # cost per unit from warehouse i to customer j
# # cost for warehouse being used
# # fixed customer demand
# m = 100 # number of customers.
# n = 50 # number of warehouses.
# numpy.random.seed(1)
# C = numpy.random.random((n, m))
# f = numpy.random.random((n, 1))
# d = numpy.random.random((m, 1))
# X = Variable(n, m)
# y = Variable(n)
# # Annotate variables.
# X.boolean = False
# y.boolean = True
# demand = [sum_entries(X[:, j]) == d[j] for j in range(m)]
# valid = [sum_entries(X[i, :]) <= y[i]*d.sum() for i in range(n)]
# obj = sum_entries(mul_elemwise(C, X)) + f.T*y
# prob = Problem(Minimize(obj),
# [X >= 0, sum_entries(y) >= 3*n/4] + demand + valid)
# relaxation = cvx_relax(prob)
# print relaxation.solve()
# rounded = round_and_fix(relaxation)
# # rounded = relaxation
# # K = 4
# # for i in range(K):
# # print i
# # rounded = round_and_fix3(rounded, i/(2*K))
# # print y.value.sum()
# print rounded.solve()
# print rounded.status
# print y.value.sum()
# # print numpy.around(X.value, 2)
# # print numpy.around(y.value, 2)
| gpl-3.0 |
dsm054/pandas | pandas/tests/extension/base/getitem.py | 4 | 8062 | import numpy as np
import pytest
import pandas as pd
from .base import BaseExtensionTests
class BaseGetitemTests(BaseExtensionTests):
"""Tests for ExtensionArray.__getitem__."""
def test_iloc_series(self, data):
ser = pd.Series(data)
result = ser.iloc[:4]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.iloc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_iloc_frame(self, data):
df = pd.DataFrame({"A": data, 'B':
np.arange(len(data), dtype='int64')})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.iloc[:4, [0]]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.iloc[[0, 1, 2, 3], [0]]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name='A')
# slice -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
# sequence -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
def test_loc_series(self, data):
ser = pd.Series(data)
result = ser.loc[:3]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.loc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_loc_frame(self, data):
df = pd.DataFrame({"A": data,
'B': np.arange(len(data), dtype='int64')})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.loc[:3, ['A']]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.loc[[0, 1, 2, 3], ['A']]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name='A')
# slice -> series
result = df.loc[:3, 'A']
self.assert_series_equal(result, expected)
# sequence -> series
result = df.loc[:3, 'A']
self.assert_series_equal(result, expected)
def test_getitem_scalar(self, data):
result = data[0]
assert isinstance(result, data.dtype.type)
result = pd.Series(data)[0]
assert isinstance(result, data.dtype.type)
def test_getitem_scalar_na(self, data_missing, na_cmp, na_value):
result = data_missing[0]
assert na_cmp(result, na_value)
def test_getitem_mask(self, data):
# Empty mask, raw array
mask = np.zeros(len(data), dtype=bool)
result = data[mask]
assert len(result) == 0
assert isinstance(result, type(data))
# Empty mask, in series
mask = np.zeros(len(data), dtype=bool)
result = pd.Series(data)[mask]
assert len(result) == 0
assert result.dtype == data.dtype
# non-empty mask, raw array
mask[0] = True
result = data[mask]
assert len(result) == 1
assert isinstance(result, type(data))
# non-empty mask, in series
result = pd.Series(data)[mask]
assert len(result) == 1
assert result.dtype == data.dtype
def test_getitem_slice(self, data):
# getitem[slice] should return an array
result = data[slice(0)] # empty
assert isinstance(result, type(data))
result = data[slice(1)] # scalar
assert isinstance(result, type(data))
def test_get(self, data):
# GH 20882
s = pd.Series(data, index=[2 * i for i in range(len(data))])
assert s.get(4) == s.iloc[2]
result = s.get([4, 6])
expected = s.iloc[[2, 3]]
self.assert_series_equal(result, expected)
result = s.get(slice(2))
expected = s.iloc[[0, 1]]
self.assert_series_equal(result, expected)
assert s.get(-1) is None
assert s.get(s.index.max() + 1) is None
s = pd.Series(data[:6], index=list('abcdef'))
assert s.get('c') == s.iloc[2]
result = s.get(slice('b', 'd'))
expected = s.iloc[[1, 2, 3]]
self.assert_series_equal(result, expected)
result = s.get('Z')
assert result is None
assert s.get(4) == s.iloc[4]
assert s.get(-1) == s.iloc[-1]
assert s.get(len(s)) is None
# GH 21257
s = pd.Series(data)
s2 = s[::2]
assert s2.get(1) is None
def test_take_sequence(self, data):
result = pd.Series(data)[[0, 1, 3]]
assert result.iloc[0] == data[0]
assert result.iloc[1] == data[1]
assert result.iloc[2] == data[3]
def test_take(self, data, na_value, na_cmp):
result = data.take([0, -1])
assert result.dtype == data.dtype
assert result[0] == data[0]
assert result[1] == data[-1]
result = data.take([0, -1], allow_fill=True, fill_value=na_value)
assert result[0] == data[0]
assert na_cmp(result[1], na_value)
with pytest.raises(IndexError, match="out of bounds"):
data.take([len(data) + 1])
def test_take_empty(self, data, na_value, na_cmp):
empty = data[:0]
result = empty.take([-1], allow_fill=True)
assert na_cmp(result[0], na_value)
with pytest.raises(IndexError):
empty.take([-1])
with pytest.raises(IndexError, match="cannot do a non-empty take"):
empty.take([0, 1])
def test_take_negative(self, data):
# https://github.com/pandas-dev/pandas/issues/20640
n = len(data)
result = data.take([0, -n, n - 1, -1])
expected = data.take([0, 0, n - 1, n - 1])
self.assert_extension_array_equal(result, expected)
def test_take_non_na_fill_value(self, data_missing):
fill_value = data_missing[1] # valid
na = data_missing[0]
array = data_missing._from_sequence([na, fill_value, na])
result = array.take([-1, 1], fill_value=fill_value, allow_fill=True)
expected = array.take([1, 1])
self.assert_extension_array_equal(result, expected)
def test_take_pandas_style_negative_raises(self, data, na_value):
with pytest.raises(ValueError):
data.take([0, -2], fill_value=na_value, allow_fill=True)
@pytest.mark.parametrize('allow_fill', [True, False])
def test_take_out_of_bounds_raises(self, data, allow_fill):
arr = data[:3]
with pytest.raises(IndexError):
arr.take(np.asarray([0, 3]), allow_fill=allow_fill)
def test_take_series(self, data):
s = pd.Series(data)
result = s.take([0, -1])
expected = pd.Series(
data._from_sequence([data[0], data[len(data) - 1]], dtype=s.dtype),
index=[0, len(data) - 1])
self.assert_series_equal(result, expected)
def test_reindex(self, data, na_value):
s = pd.Series(data)
result = s.reindex([0, 1, 3])
expected = pd.Series(data.take([0, 1, 3]), index=[0, 1, 3])
self.assert_series_equal(result, expected)
n = len(data)
result = s.reindex([-1, 0, n])
expected = pd.Series(
data._from_sequence([na_value, data[0], na_value],
dtype=s.dtype),
index=[-1, 0, n])
self.assert_series_equal(result, expected)
result = s.reindex([n, n + 1])
expected = pd.Series(data._from_sequence([na_value, na_value],
dtype=s.dtype),
index=[n, n + 1])
self.assert_series_equal(result, expected)
def test_reindex_non_na_fill_value(self, data_missing):
valid = data_missing[1]
na = data_missing[0]
array = data_missing._from_sequence([na, valid])
ser = pd.Series(array)
result = ser.reindex([0, 1, 2], fill_value=valid)
expected = pd.Series(data_missing._from_sequence([na, valid, valid]))
self.assert_series_equal(result, expected)
| bsd-3-clause |
terkkila/scikit-learn | examples/plot_multilabel.py | 87 | 4279 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/pandas/computation/pytables.py | 9 | 20208 | """ manage PyTables query interface via Expressions """
import ast
import time
import warnings
from functools import partial
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas.compat import u, string_types, PY3, DeepChainMap
from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.computation import expr, ops
from pandas.computation.ops import is_term, UndefinedVariableError
from pandas.computation.scope import _ensure_scope
from pandas.computation.expr import BaseExprVisitor
from pandas.computation.common import _ensure_decoded
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
class Scope(expr.Scope):
__slots__ = 'queryables',
def __init__(self, level, global_dict=None, local_dict=None,
queryables=None):
super(Scope, self).__init__(level + 1, global_dict=global_dict,
local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super(Term, self).__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == 'left':
if self.name not in self.env.queryables:
raise NameError('name {0!r} is not defined'.format(self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super(BinOp, self).__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if (isinstance(left, ConditionBinOp) and
isinstance(right, ConditionBinOp)):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if (isinstance(left, FilterBinOp) and
isinstance(right, FilterBinOp)):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(self.op, left, right, queryables=self.queryables,
encoding=self.encoding).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not com.is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self):
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self):
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs),'kind',None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs),'meta',None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs),'metadata',None)
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "(%s %s %s)" % (self.lhs, self.op, val)
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(com.pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = com.pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == u('datetime64') or kind == u('datetime'):
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = pd.Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif (isinstance(v, datetime) or hasattr(v, 'timetuple') or
kind == u('date')):
v = time.mktime(v.timetuple())
return TermValue(v, pd.Timestamp(v), kind)
elif kind == u('timedelta64') or kind == u('timedelta'):
v = _coerce_scalar_to_timedelta_type(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == u('category'):
metadata = com._values_from_object(self.metadata)
result = metadata.searchsorted(v,side='left')
return TermValue(result, result, u('integer'))
elif kind == u('integer'):
v = int(float(v))
return TermValue(v, v, kind)
elif kind == u('float'):
v = float(v)
return TermValue(v, v, kind)
elif kind == u('bool'):
if isinstance(v, string_types):
v = not v.strip().lower() in [u('false'), u('f'), u('no'),
u('n'), u('none'), u('0'),
u('[]'), u('{}'), u('')]
else:
v = bool(v)
return TermValue(v, v, kind)
elif not isinstance(v, string_types):
v = stringify(v)
return TermValue(v, stringify(v), u('string'))
# string quoting
return TermValue(v, stringify(v), u('string'))
def convert_values(self):
pass
class FilterBinOp(BinOp):
def __unicode__(self):
return com.pprint_thing("[Filter : [{0}] -> "
"[{1}]".format(self.filter[0], self.filter[1]))
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
rhs = self.conform(self.rhs)
values = [TermValue(v, v, self.kind) for v in rhs]
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ['==', '!='] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
return self
return None
# equality conditions
if self.op in ['==', '!=']:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
else:
raise TypeError(
"passing a filterable condition to a non-table indexer [%s]" %
self)
return self
def generate_filter_op(self, invert=False):
if (self.op == '!=' and not invert) or (self.op == '==' and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __unicode__(self):
return com.pprint_thing("[Condition : [{0}]]".format(self.condition))
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError("cannot use an invert condition when "
"passing to numexpr")
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ['==', '!=']:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = "(%s)" % ' | '.join(vs)
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = "(%s %s %s)" % (
self.lhs.condition,
self.op,
self.rhs.condition)
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != '~':
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
_op_classes = {'unary': UnaryOp}
class ExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super(ExprVisitor, self).__init__(env, engine, parser)
for bin_op in self.binary_ops:
setattr(self, 'visit_{0}'.format(self.binary_op_nodes_map[bin_op]),
lambda node, bin_op=bin_op: partial(BinOp, bin_op,
**kwargs))
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp('~', self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError('Unary addition not supported')
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0],
comparators=[node.value])
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple suscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError("cannot subscript {0!r} with "
"{1!r}".format(value, slobj))
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx.__class__
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overriden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
----------
where : string term expression, Expr, or list-like of Exprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
an Expr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
def __init__(self, where, op=None, value=None, queryables=None,
encoding=None, scope_level=0):
# try to be back compat
where = self.parse_back_compat(where, op, value)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict = DeepChainMap()
if isinstance(where, Expr):
local_dict = where.env.scope
where = where.expr
elif isinstance(where, (list, tuple)):
for idx, w in enumerate(where):
if isinstance(w, Expr):
local_dict = w.env.scope
else:
w = self.parse_back_compat(w)
where[idx] = w
where = ' & ' .join(["(%s)" % w for w in where])
self.expr = where
self.env = Scope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, string_types):
self.env.queryables.update(queryables)
self._visitor = ExprVisitor(self.env, queryables=queryables,
parser='pytables', engine='pytables',
encoding=encoding)
self.terms = self.parse()
def parse_back_compat(self, w, op=None, value=None):
""" allow backward compatibility for passed arguments """
if isinstance(w, dict):
w, op, value = w.get('field'), w.get('op'), w.get('value')
if not isinstance(w, string_types):
raise TypeError(
"where must be passed as a string if op/value are passed")
warnings.warn("passing a dict to Expr is deprecated, "
"pass the where as a single string",
DeprecationWarning)
if isinstance(w, tuple):
if len(w) == 2:
w, value = w
op = '=='
elif len(w) == 3:
w, op, value = w
warnings.warn("passing a tuple into Expr is deprecated, "
"pass the where as a single string",
DeprecationWarning, stacklevel=10)
if op is not None:
if not isinstance(w, string_types):
raise TypeError(
"where must be passed as a string if op/value are passed")
if isinstance(op, Expr):
raise TypeError("invalid op passed, must be a string")
w = "{0}{1}".format(w, op)
if value is not None:
if isinstance(value, Expr):
raise TypeError("invalid value passed, must be a string")
# stringify with quotes these values
def convert(v):
if isinstance(v, (datetime,np.datetime64,timedelta,np.timedelta64)) or hasattr(v, 'timetuple'):
return "'{0}'".format(v)
return v
if isinstance(value, (list,tuple)):
value = [ convert(v) for v in value ]
else:
value = convert(value)
w = "{0}{1}".format(w, value)
warnings.warn("passing multiple values to Expr is deprecated, "
"pass the where as a single string",
DeprecationWarning)
return w
def __unicode__(self):
if self.terms is not None:
return com.pprint_thing(self.terms)
return com.pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid condition".format(self.expr, self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid filter".format(self.expr, self))
return self.condition, self.filter
class TermValue(object):
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == u('string'):
if encoding is not None:
return self.converted
return '"%s"' % self.converted
return self.converted
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, string_types):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
| mit |
openego/eDisGo | edisgo/data/import_data.py | 1 | 89664 | from ..grid.components import Load, Generator, BranchTee, MVStation, Line, \
Transformer, LVStation, GeneratorFluctuating
from ..grid.grids import MVGrid, LVGrid
from ..grid.connect import connect_mv_generators, connect_lv_generators
from ..grid.tools import select_cable, position_switch_disconnectors
from ..tools.geo import proj2equidistant
from edisgo.tools import pypsa_io
from edisgo.tools import session_scope
from egoio.db_tables import model_draft, supply
from sqlalchemy import func
from workalendar.europe import Germany
from demandlib import bdew as bdew, particular_profiles as profiles
import datetime
import pandas as pd
import numpy as np
import networkx as nx
from math import isnan
import random
import os
if not 'READTHEDOCS' in os.environ:
from ding0.tools.results import load_nd_from_pickle
from ding0.core.network.stations import LVStationDing0
from ding0.core.structure.regions import LVLoadAreaCentreDing0
from ding0.core import GeneratorFluctuatingDing0
from shapely.ops import transform
from shapely.wkt import loads as wkt_loads
import logging
logger = logging.getLogger('edisgo')
def import_from_ding0(file, network):
"""
Import an eDisGo grid topology from
`Ding0 data <https://github.com/openego/ding0>`_.
This import method is specifically designed to load grid topology data in
the format as `Ding0 <https://github.com/openego/ding0>`_ provides it via
pickles.
The import of the grid topology includes
* the topology itself
* equipment parameter
* generators incl. location, type, subtype and capacity
* loads incl. location and sectoral consumption
Parameters
----------
file: :obj:`str` or :class:`ding0.core.NetworkDing0`
If a str is provided it is assumed it points to a pickle with Ding0
grid data. This file will be read.
If an object of the type :class:`ding0.core.NetworkDing0` data will be
used directly from this object.
network: :class:`~.grid.network.Network`
The eDisGo data container object
Notes
-----
Assumes :class:`ding0.core.NetworkDing0` provided by `file` contains
only data of one mv_grid_district.
"""
# when `file` is a string, it will be read by the help of pickle
if isinstance(file, str):
ding0_nd = load_nd_from_pickle(filename=file)
# otherwise it is assumed the object is passed directly
else:
ding0_nd = file
ding0_mv_grid = ding0_nd._mv_grid_districts[0].mv_grid
# Make sure circuit breakers (respectively the rings) are closed
ding0_mv_grid.close_circuit_breakers()
# Import medium-voltage grid data
network.mv_grid = _build_mv_grid(ding0_mv_grid, network)
# Import low-voltage grid data
lv_grids, lv_station_mapping, lv_grid_mapping = _build_lv_grid(
ding0_mv_grid, network)
# Assign lv_grids to network
network.mv_grid.lv_grids = lv_grids
# Integrate disconnecting points
position_switch_disconnectors(network.mv_grid,
mode=network.config['disconnecting_point'][
'position'])
# Check data integrity
_validate_ding0_grid_import(network.mv_grid, ding0_mv_grid,
lv_grid_mapping)
# Set data source
network.set_data_source('grid', 'dingo')
# Set more params
network._id = network.mv_grid.id
# Update the weather_cell_ids in mv_grid to include the ones in lv_grids
# ToDo: maybe get a better solution to push the weather_cell_ids in lv_grids but not in mv_grid but into the
# mv_grid.weather_cell_ids from within the Grid() object or the MVGrid() or LVGrid()
mv_weather_cell_id = network.mv_grid.weather_cells
for lvg in lv_grids:
if lvg.weather_cells:
for lv_w_id in lvg._weather_cells:
if not (lv_w_id in mv_weather_cell_id):
network.mv_grid._weather_cells.append(lv_w_id)
def _build_lv_grid(ding0_grid, network):
"""
Build eDisGo LV grid from Ding0 data
Parameters
----------
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
Returns
-------
list of LVGrid
LV grids
dict
Dictionary containing a mapping of LV stations in Ding0 to newly
created eDisGo LV stations. This mapping is used to use the same
instances of LV stations in the MV grid graph.
"""
lv_station_mapping = {}
lv_grids = []
lv_grid_mapping = {}
for la in ding0_grid.grid_district._lv_load_areas:
for lvgd in la._lv_grid_districts:
ding0_lv_grid = lvgd.lv_grid
if not ding0_lv_grid.grid_district.lv_load_area.is_aggregated:
# Create LV grid instance
lv_grid = LVGrid(
id=ding0_lv_grid.id_db,
geom=ding0_lv_grid.grid_district.geo_data,
grid_district={
'geom': ding0_lv_grid.grid_district.geo_data,
'population': ding0_lv_grid.grid_district.population},
voltage_nom=ding0_lv_grid.v_level / 1e3,
network=network)
station = {repr(_): _ for _ in
network.mv_grid.graph.nodes_by_attribute(
'lv_station')}['LVStation_' + str(
ding0_lv_grid._station.id_db)]
station.grid = lv_grid
for t in station.transformers:
t.grid = lv_grid
lv_grid.graph.add_node(station, type='lv_station')
lv_station_mapping.update({ding0_lv_grid._station: station})
# Create list of load instances and add these to grid's graph
loads = {_: Load(
id=_.id_db,
geom=_.geo_data,
grid=lv_grid,
consumption=_.consumption) for _ in ding0_lv_grid.loads()}
lv_grid.graph.add_nodes_from(loads.values(), type='load')
# Create list of generator instances and add these to grid's
# graph
generators = {_: (GeneratorFluctuating(
id=_.id_db,
geom=_.geo_data,
nominal_capacity=_.capacity,
type=_.type,
subtype=_.subtype,
grid=lv_grid,
weather_cell_id=_.weather_cell_id,
v_level=_.v_level) if _.type in ['wind', 'solar'] else
Generator(
id=_.id_db,
geom=_.geo_data,
nominal_capacity=_.capacity,
type=_.type,
subtype=_.subtype,
grid=lv_grid,
v_level=_.v_level))
for _ in ding0_lv_grid.generators()}
lv_grid.graph.add_nodes_from(generators.values(),
type='generator')
# Create list of branch tee instances and add these to grid's
# graph
branch_tees = {
_: BranchTee(id=_.id_db,
geom=_.geo_data,
grid=lv_grid,
in_building=_.in_building)
for _ in ding0_lv_grid._cable_distributors}
lv_grid.graph.add_nodes_from(branch_tees.values(),
type='branch_tee')
# Merge node above defined above to a single dict
nodes = {**loads,
**generators,
**branch_tees,
**{ding0_lv_grid._station: station}}
edges = []
edges_raw = list(nx.get_edge_attributes(
ding0_lv_grid._graph, name='branch').items())
for edge in edges_raw:
edges.append({'adj_nodes': edge[0], 'branch': edge[1]})
# Create list of line instances and add these to grid's graph
lines = [(nodes[_['adj_nodes'][0]], nodes[_['adj_nodes'][1]],
{'line': Line(
id=_['branch'].id_db,
type=_['branch'].type,
length=_['branch'].length / 1e3,
kind=_['branch'].kind,
grid=lv_grid)
})
for _ in edges]
# convert voltage from V to kV
for line in lines:
# ToDo: remove work around once it's fixed in ding0
if line[2]['line'].type['U_n'] >= 400:
line[2]['line'].type['U_n'] = \
line[2]['line'].type['U_n'] / 1e3
lv_grid.graph.add_edges_from(lines, type='line')
# Add LV station as association to LV grid
lv_grid._station = station
# Add to lv grid mapping
lv_grid_mapping.update({lv_grid: ding0_lv_grid})
# Put all LV grid to a list of LV grids
lv_grids.append(lv_grid)
# ToDo: don't forget to adapt lv stations creation in MV grid
return lv_grids, lv_station_mapping, lv_grid_mapping
def _build_mv_grid(ding0_grid, network):
"""
Parameters
----------
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
network: Network
The eDisGo container object
Returns
-------
MVGrid
A MV grid of class edisgo.grids.MVGrid is return. Data from the Ding0
MV Grid object is translated to the new grid object.
"""
# Instantiate a MV grid
grid = MVGrid(
id=ding0_grid.id_db,
network=network,
grid_district={'geom': ding0_grid.grid_district.geo_data,
'population':
sum([_.zensus_sum
for _ in
ding0_grid.grid_district._lv_load_areas
if not np.isnan(_.zensus_sum)])},
voltage_nom=ding0_grid.v_level)
# Special treatment of LVLoadAreaCenters see ...
# ToDo: add a reference above for explanation of how these are treated
la_centers = [_ for _ in ding0_grid._graph.nodes()
if isinstance(_, LVLoadAreaCentreDing0)]
if la_centers:
aggregated, aggr_stations, dingo_import_data = \
_determine_aggregated_nodes(la_centers)
network.dingo_import_data = dingo_import_data
else:
aggregated = {}
aggr_stations = []
# create empty DF for imported agg. generators
network.dingo_import_data = pd.DataFrame(columns=('id',
'capacity',
'agg_geno')
)
# Create list of load instances and add these to grid's graph
loads = {_: Load(
id=_.id_db,
geom=_.geo_data,
grid=grid,
consumption=_.consumption) for _ in ding0_grid.loads()}
grid.graph.add_nodes_from(loads.values(), type='load')
# Create list of generator instances and add these to grid's graph
generators = {_: (GeneratorFluctuating(
id=_.id_db,
geom=_.geo_data,
nominal_capacity=_.capacity,
type=_.type,
subtype=_.subtype,
grid=grid,
weather_cell_id=_.weather_cell_id,
v_level=_.v_level) if _.type in ['wind', 'solar'] else
Generator(
id=_.id_db,
geom=_.geo_data,
nominal_capacity=_.capacity,
type=_.type,
subtype=_.subtype,
grid=grid,
v_level=_.v_level))
for _ in ding0_grid.generators()}
grid.graph.add_nodes_from(generators.values(), type='generator')
# Create list of branch tee instances and add these to grid's graph
branch_tees = {_: BranchTee(id=_.id_db,
geom=_.geo_data,
grid=grid,
in_building=False)
for _ in ding0_grid._cable_distributors}
grid.graph.add_nodes_from(branch_tees.values(), type='branch_tee')
# Create list of LV station instances and add these to grid's graph
stations = {_: LVStation(id=_.id_db,
geom=_.geo_data,
mv_grid=grid,
grid=None, # (this will be set during LV import)
transformers=[Transformer(
mv_grid=grid,
grid=None, # (this will be set during LV import)
id='_'.join(['LVStation',
str(_.id_db),
'transformer',
str(count)]),
geom=_.geo_data,
voltage_op=t.v_level,
type=pd.Series(dict(
S_nom=t.s_max_a, x_pu=t.x_pu, r_pu=t.r_pu))
) for (count, t) in enumerate(_.transformers(), 1)])
for _ in ding0_grid._graph.nodes()
if isinstance(_, LVStationDing0) and _ not in aggr_stations}
grid.graph.add_nodes_from(stations.values(), type='lv_station')
# Create HV-MV station add to graph
mv_station = MVStation(
id=ding0_grid.station().id_db,
geom=ding0_grid.station().geo_data,
grid=grid,
transformers=[Transformer(
mv_grid=grid,
grid=grid,
id='_'.join(['MVStation',
str(ding0_grid.station().id_db),
'transformer',
str(count)]),
geom=ding0_grid.station().geo_data,
voltage_op=_.v_level,
type=pd.Series(dict(
S_nom=_.s_max_a, x_pu=_.x_pu, r_pu=_.r_pu)))
for (count, _) in enumerate(
ding0_grid.station().transformers(), 1)])
grid.graph.add_node(mv_station, type='mv_station')
# Merge node above defined above to a single dict
nodes = {**loads,
**generators,
**branch_tees,
**stations,
**{ding0_grid.station(): mv_station}}
# Create list of line instances and add these to grid's graph
lines = [(nodes[_['adj_nodes'][0]], nodes[_['adj_nodes'][1]],
{'line': Line(
id=_['branch'].id_db,
type=_['branch'].type,
kind=_['branch'].kind,
length=_['branch'].length / 1e3,
grid=grid)
})
for _ in ding0_grid.graph_edges()
if not any([isinstance(_['adj_nodes'][0], LVLoadAreaCentreDing0),
isinstance(_['adj_nodes'][1], LVLoadAreaCentreDing0)])]
# set line name as series name
for line in lines:
line[2]['line'].type.name = line[2]['line'].type['name']
grid.graph.add_edges_from(lines, type='line')
# Assign reference to HV-MV station to MV grid
grid._station = mv_station
# Attach aggregated to MV station
_attach_aggregated(network, grid, aggregated, ding0_grid)
return grid
def _determine_aggregated_nodes(la_centers):
"""Determine generation and load within load areas
Parameters
----------
la_centers: list of LVLoadAreaCentre
Load Area Centers are Ding0 implementations for representating areas of
high population density with high demand compared to DG potential.
Notes
-----
Currently, MV grid loads are not considered in this aggregation function as
Ding0 data does not come with loads in the MV grid level.
Returns
-------
:obj:`list` of dict
aggregated
Dict of the structure
.. code:
{'generation': {
'v_level': {
'subtype': {
'ids': <ids of aggregated generator>,
'capacity'}
}
},
'load': {
'consumption':
'residential': <value>,
'retail': <value>,
...
}
'aggregates': {
'population': int,
'geom': `shapely.Polygon`
}
}
:obj:`list`
aggr_stations
List of LV stations its generation and load is aggregated
"""
def aggregate_generators(gen, aggr):
"""Aggregate generation capacity per voltage level
Parameters
----------
gen: ding0.core.GeneratorDing0
Ding0 Generator object
aggr: dict
Aggregated generation capacity. For structure see
`_determine_aggregated_nodes()`.
Returns
-------
"""
if gen.v_level not in aggr['generation']:
aggr['generation'][gen.v_level] = {}
if gen.type not in aggr['generation'][gen.v_level]:
aggr['generation'][gen.v_level][gen.type] = {}
if gen.subtype not in aggr['generation'][gen.v_level][gen.type]:
aggr['generation'][gen.v_level][gen.type].update(
{gen.subtype: {'ids': [gen.id_db],
'capacity': gen.capacity}})
else:
aggr['generation'][gen.v_level][gen.type][gen.subtype][
'ids'].append(gen.id_db)
aggr['generation'][gen.v_level][gen.type][gen.subtype][
'capacity'] += gen.capacity
return aggr
def aggregate_loads(la_center, aggr):
"""Aggregate consumption in load area per sector
Parameters
----------
la_center: LVLoadAreaCentreDing0
Load area center object from Ding0
Returns
-------
"""
for s in ['retail', 'industrial', 'agricultural', 'residential']:
if s not in aggr['load']:
aggr['load'][s] = 0
aggr['load']['retail'] += sum(
[_.sector_consumption_retail
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['industrial'] += sum(
[_.sector_consumption_industrial
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['agricultural'] += sum(
[_.sector_consumption_agricultural
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['residential'] += sum(
[_.sector_consumption_residential
for _ in la_center.lv_load_area._lv_grid_districts])
return aggr
aggregated = {}
aggr_stations = []
# ToDo: The variable generation_aggr is further used -> delete this code
generation_aggr = {}
for la in la_centers[0].grid.grid_district._lv_load_areas:
for lvgd in la._lv_grid_districts:
for gen in lvgd.lv_grid.generators():
if la.is_aggregated:
generation_aggr.setdefault(gen.type, {})
generation_aggr[gen.type].setdefault(gen.subtype, {'ding0': 0})
generation_aggr[gen.type][gen.subtype].setdefault('ding0', 0)
generation_aggr[gen.type][gen.subtype]['ding0'] += gen.capacity
dingo_import_data = pd.DataFrame(columns=('id',
'capacity',
'agg_geno')
)
for la_center in la_centers:
aggr = {'generation': {}, 'load': {}, 'aggregates': []}
# Determine aggregated generation in LV grid
for lvgd in la_center.lv_load_area._lv_grid_districts:
weather_cell_ids = {}
for gen in lvgd.lv_grid.generators():
aggr = aggregate_generators(gen, aggr)
# Get the aggregated weather cell id of the area
# b
if isinstance(gen, GeneratorFluctuatingDing0):
if gen.weather_cell_id not in weather_cell_ids.keys():
weather_cell_ids[gen.weather_cell_id] = 1
else:
weather_cell_ids[gen.weather_cell_id] += 1
dingo_import_data.loc[len(dingo_import_data)] = \
[int(gen.id_db),
gen.capacity,
None]
# Get the weather cell id that occurs the most if there are any generators
if not(list(lvgd.lv_grid.generators())):
weather_cell_id = None
else:
if weather_cell_ids:
weather_cell_id = list(weather_cell_ids.keys())[
list(weather_cell_ids.values()).index(
max(weather_cell_ids.values()))]
else:
weather_cell_id = None
for v_level in aggr['generation']:
for type in aggr['generation'][v_level]:
for subtype in aggr['generation'][v_level][type]:
# make sure to check if there are any generators before assigning
# a weather cell id
if not(list(lvgd.lv_grid.generators())):
pass
else:
aggr['generation'][v_level][type][subtype]['weather_cell_id'] = \
weather_cell_id
# Determine aggregated load in MV grid
# -> Implement once laods in Ding0 MV grids exist
# Determine aggregated load in LV grid
aggr = aggregate_loads(la_center, aggr)
# Collect metadata of aggregated load areas
aggr['aggregates'] = {
'population': la_center.lv_load_area.zensus_sum,
'geom': la_center.lv_load_area.geo_area}
# Determine LV grids/ stations that are aggregated
for _ in la_center.lv_load_area._lv_grid_districts:
aggr_stations.append(_.lv_grid.station())
# add elements to lists
aggregated.update({la_center.id_db: aggr})
return aggregated, aggr_stations, dingo_import_data
def _attach_aggregated(network, grid, aggregated, ding0_grid):
"""Add Generators and Loads to MV station representing aggregated generation
capacity and load
Parameters
----------
grid: MVGrid
MV grid object
aggregated: dict
Information about aggregated load and generation capacity. For
information about the structure of the dict see ... .
ding0_grid: ding0.Network
Ding0 network container
Returns
-------
MVGrid
Altered instance of MV grid including aggregated load and generation
"""
aggr_line_type = ding0_grid.network._static_data['MV_cables'].iloc[
ding0_grid.network._static_data['MV_cables']['I_max_th'].idxmax()]
for la_id, la in aggregated.items():
# add aggregated generators
for v_level, val in la['generation'].items():
for type, val2 in val.items():
for subtype, val3 in val2.items():
if type in ['solar', 'wind']:
gen = GeneratorFluctuating(
id='agg-' + str(la_id) + '-' + '_'.join(
[str(_) for _ in val3['ids']]),
nominal_capacity=val3['capacity'],
weather_cell_id=val3['weather_cell_id'],
type=type,
subtype=subtype,
geom=grid.station.geom,
grid=grid,
v_level=4)
else:
gen = Generator(
id='agg-' + str(la_id) + '-' + '_'.join(
[str(_) for _ in val3['ids']]),
nominal_capacity=val3['capacity'],
type=type,
subtype=subtype,
geom=grid.station.geom,
grid=grid,
v_level=4)
grid.graph.add_node(gen, type='generator_aggr')
# backup reference of geno to LV geno list (save geno
# where the former LV genos are aggregated in)
network.dingo_import_data.set_value(network.dingo_import_data['id'].isin(val3['ids']),
'agg_geno',
gen)
# connect generator to MV station
line = Line(id='line_aggr_generator_la_' + str(la_id) + '_vlevel_{v_level}_'
'{subtype}'.format(
v_level=v_level,
subtype=subtype),
type=aggr_line_type,
kind='cable',
length=1e-3,
grid=grid)
grid.graph.add_edge(grid.station,
gen,
line=line,
type='line_aggr')
for sector, sectoral_load in la['load'].items():
load = Load(
geom=grid.station.geom,
consumption={sector: sectoral_load},
grid=grid,
id='_'.join(['Load_aggregated', sector, repr(grid), str(la_id)]))
grid.graph.add_node(load, type='load')
# connect aggregated load to MV station
line = Line(id='_'.join(['line_aggr_load_la_' + str(la_id), sector, str(la_id)]),
type=aggr_line_type,
kind='cable',
length=1e-3,
grid=grid)
grid.graph.add_edge(grid.station,
load,
line=line,
type='line_aggr')
def _validate_ding0_grid_import(mv_grid, ding0_mv_grid, lv_grid_mapping):
"""Cross-check imported data with original data source
Parameters
----------
mv_grid: MVGrid
eDisGo MV grid instance
ding0_mv_grid: MVGridDing0
Ding0 MV grid instance
lv_grid_mapping: dict
Translates Ding0 LV grids to associated, newly created eDisGo LV grids
"""
# Check number of components in MV grid
_validate_ding0_mv_grid_import(mv_grid, ding0_mv_grid)
# Check number of components in LV grid
_validate_ding0_lv_grid_import(mv_grid.lv_grids, ding0_mv_grid,
lv_grid_mapping)
# Check cumulative load and generation in MV grid district
_validate_load_generation(mv_grid, ding0_mv_grid)
def _validate_ding0_mv_grid_import(grid, ding0_grid):
"""Verify imported data with original data from Ding0
Parameters
----------
grid: MVGrid
MV Grid data (eDisGo)
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
Notes
-----
The data validation excludes grid components located in aggregated load
areas as these are represented differently in eDisGo.
Returns
-------
dict
Dict showing data integrity for each type of grid component
"""
integrity_checks = ['branch_tee',
'disconnection_point', 'mv_transformer',
'lv_station'#,'line',
]
data_integrity = {}
data_integrity.update({_: {'ding0': None, 'edisgo': None, 'msg': None}
for _ in integrity_checks})
# Check number of branch tees
data_integrity['branch_tee']['ding0'] = len(ding0_grid._cable_distributors)
data_integrity['branch_tee']['edisgo'] = len(
grid.graph.nodes_by_attribute('branch_tee'))
# Check number of disconnecting points
data_integrity['disconnection_point']['ding0'] = len(
ding0_grid._circuit_breakers)
data_integrity['disconnection_point']['edisgo'] = len(
grid.graph.nodes_by_attribute('mv_disconnecting_point'))
# Check number of MV transformers
data_integrity['mv_transformer']['ding0'] = len(
list(ding0_grid.station().transformers()))
data_integrity['mv_transformer']['edisgo'] = len(
grid.station.transformers)
# Check number of LV stations in MV grid (graph)
data_integrity['lv_station']['edisgo'] = len(grid.graph.nodes_by_attribute(
'lv_station'))
data_integrity['lv_station']['ding0'] = len(
[_ for _ in ding0_grid._graph.nodes()
if (isinstance(_, LVStationDing0) and
not _.grid.grid_district.lv_load_area.is_aggregated)])
# Check number of lines outside aggregated LA
# edges_w_la = grid.graph.lines()
# data_integrity['line']['edisgo'] = len([_ for _ in edges_w_la
# if not (_['adj_nodes'][0] == grid.station or
# _['adj_nodes'][1] == grid.station) and
# _['line']._length > .5])
# data_integrity['line']['ding0'] = len(
# [_ for _ in ding0_grid.lines()
# if not _['branch'].connects_aggregated])
# raise an error if data does not match
for c in integrity_checks:
if data_integrity[c]['edisgo'] != data_integrity[c]['ding0']:
raise ValueError(
'Unequal number of objects for {c}. '
'\n\tDing0:\t{ding0_no}'
'\n\teDisGo:\t{edisgo_no}'.format(
c=c,
ding0_no=data_integrity[c]['ding0'],
edisgo_no=data_integrity[c]['edisgo']))
return data_integrity
def _validate_ding0_lv_grid_import(grids, ding0_grid, lv_grid_mapping):
"""Verify imported data with original data from Ding0
Parameters
----------
grids: list of LVGrid
LV Grid data (eDisGo)
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
lv_grid_mapping: dict
Defines relationship between Ding0 and eDisGo grid objects
Notes
-----
The data validation excludes grid components located in aggregated load
areas as these are represented differently in eDisGo.
Returns
-------
dict
Dict showing data integrity for each type of grid component
"""
integrity_checks = ['branch_tee', 'lv_transformer',
'generator', 'load','line']
data_integrity = {}
for grid in grids:
data_integrity.update({grid:{_: {'ding0': None, 'edisgo': None, 'msg': None}
for _ in integrity_checks}})
# Check number of branch tees
data_integrity[grid]['branch_tee']['ding0'] = len(
lv_grid_mapping[grid]._cable_distributors)
data_integrity[grid]['branch_tee']['edisgo'] = len(
grid.graph.nodes_by_attribute('branch_tee'))
# Check number of LV transformers
data_integrity[grid]['lv_transformer']['ding0'] = len(
list(lv_grid_mapping[grid].station().transformers()))
data_integrity[grid]['lv_transformer']['edisgo'] = len(
grid.station.transformers)
# Check number of generators
data_integrity[grid]['generator']['edisgo'] = len(
grid.generators)
data_integrity[grid]['generator']['ding0'] = len(
list(lv_grid_mapping[grid].generators()))
# Check number of loads
data_integrity[grid]['load']['edisgo'] = len(
grid.graph.nodes_by_attribute('load'))
data_integrity[grid]['load']['ding0'] = len(
list(lv_grid_mapping[grid].loads()))
# Check number of lines outside aggregated LA
data_integrity[grid]['line']['edisgo'] = len(
list(grid.graph.lines()))
data_integrity[grid]['line']['ding0'] = len(
[_ for _ in lv_grid_mapping[grid].graph_edges()
if not _['branch'].connects_aggregated])
# raise an error if data does not match
for grid in grids:
for c in integrity_checks:
if data_integrity[grid][c]['edisgo'] != data_integrity[grid][c]['ding0']:
raise ValueError(
'Unequal number of objects in grid {grid} for {c}. '
'\n\tDing0:\t{ding0_no}'
'\n\teDisGo:\t{edisgo_no}'.format(
grid=grid,
c=c,
ding0_no=data_integrity[grid][c]['ding0'],
edisgo_no=data_integrity[grid][c]['edisgo']))
def _validate_load_generation(mv_grid, ding0_mv_grid):
"""
Parameters
----------
mv_grid
ding0_mv_grid
Notes
-----
Only loads in LV grids are compared as currently Ding0 does not have MV
connected loads
"""
decimal_places = 6
tol = 10 ** -decimal_places
sectors = ['retail', 'industrial', 'agricultural', 'residential']
consumption = {_: {'edisgo': 0, 'ding0':0} for _ in sectors}
# Collect eDisGo LV loads
for lv_grid in mv_grid.lv_grids:
for load in lv_grid.graph.nodes_by_attribute('load'):
for s in sectors:
consumption[s]['edisgo'] += load.consumption.get(s, 0)
# Collect Ding0 LV loads
for la in ding0_mv_grid.grid_district._lv_load_areas:
for lvgd in la._lv_grid_districts:
for load in lvgd.lv_grid.loads():
for s in sectors:
consumption[s]['ding0'] += load.consumption.get(s, 0)
# Compare cumulative load
for k, v in consumption.items():
if v['edisgo'] != v['ding0']:
raise ValueError(
'Consumption for {sector} does not match! '
'\n\tDing0:\t{ding0}'
'\n\teDisGo:\t{edisgo}'.format(
sector=k,
ding0=v['ding0'],
edisgo=v['edisgo']))
# Compare cumulative generation capacity
mv_gens = mv_grid.graph.nodes_by_attribute('generator')
lv_gens = []
[lv_gens.extend(_.graph.nodes_by_attribute('generator'))
for _ in mv_grid.lv_grids]
gens_aggr = mv_grid.graph.nodes_by_attribute('generator_aggr')
generation = {}
generation_aggr = {}
# collect eDisGo cumulative generation capacity
for gen in mv_gens + lv_gens:
generation.setdefault(gen.type, {})
generation[gen.type].setdefault(gen.subtype, {'edisgo': 0})
generation[gen.type][gen.subtype]['edisgo'] += gen.nominal_capacity
for gen in gens_aggr:
generation_aggr.setdefault(gen.type, {})
generation_aggr[gen.type].setdefault(gen.subtype, {'edisgo': 0})
generation_aggr[gen.type][gen.subtype]['edisgo'] += gen.nominal_capacity
generation.setdefault(gen.type, {})
generation[gen.type].setdefault(gen.subtype, {'edisgo': 0})
generation[gen.type][gen.subtype]['edisgo'] += gen.nominal_capacity
# collect Ding0 MV generation capacity
for gen in ding0_mv_grid.generators():
generation.setdefault(gen.type, {})
generation[gen.type].setdefault(gen.subtype, {'ding0': 0})
generation[gen.type][gen.subtype].setdefault('ding0', 0)
generation[gen.type][gen.subtype]['ding0'] += gen.capacity
# Collect Ding0 LV generation capacity
for la in ding0_mv_grid.grid_district._lv_load_areas:
for lvgd in la._lv_grid_districts:
for gen in lvgd.lv_grid.generators():
if la.is_aggregated:
generation_aggr.setdefault(gen.type, {})
generation_aggr[gen.type].setdefault(gen.subtype, {'ding0': 0})
generation_aggr[gen.type][gen.subtype].setdefault('ding0', 0)
generation_aggr[gen.type][gen.subtype]['ding0'] += gen.capacity
generation.setdefault(gen.type, {})
generation[gen.type].setdefault(gen.subtype, {'ding0': 0})
generation[gen.type][gen.subtype].setdefault('ding0', 0)
generation[gen.type][gen.subtype]['ding0'] += gen.capacity
# Compare cumulative generation capacity
for k1, v1 in generation.items():
for k2, v2 in v1.items():
if abs(v2['edisgo'] - v2['ding0']) > tol:
raise ValueError(
'Generation capacity of {type} {subtype} does not match! '
'\n\tDing0:\t{ding0}'
'\n\teDisGo:\t{edisgo}'.format(
type=k1,
subtype=k2,
ding0=v2['ding0'],
edisgo=v2['edisgo']))
# Compare aggregated generation capacity
for k1, v1 in generation_aggr.items():
for k2, v2 in v1.items():
if abs(v2['edisgo'] - v2['ding0']) > tol:
raise ValueError(
'Aggregated generation capacity of {type} {subtype} does '
'not match! '
'\n\tDing0:\t{ding0}'
'\n\teDisGo:\t{edisgo}'.format(
type=k1,
subtype=k2,
ding0=v2['ding0'],
edisgo=v2['edisgo']))
def import_generators(network, data_source=None, file=None):
"""Import generator data from source.
The generator data include
* nom. capacity
* type ToDo: specify!
* timeseries
Additional data which can be processed (e.g. used in OEDB data) are
* location
* type
* subtype
* capacity
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
data_source: :obj:`str`
Data source. Supported sources:
* 'oedb'
file: :obj:`str`
File to import data from, required when using file-based sources.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
List of generators
"""
if data_source == 'oedb':
logging.warning('Right now only solar and wind generators can be '
'imported from the oedb.')
_import_genos_from_oedb(network=network)
network.mv_grid._weather_cells = None
if network.pypsa is not None:
pypsa_io.update_pypsa_generator_import(network)
elif data_source == 'pypsa':
_import_genos_from_pypsa(network=network, file=file)
else:
logger.error("Invalid option {} for generator import. Must either be "
"'oedb' or 'pypsa'.".format(data_source))
raise ValueError('The option you specified is not supported.')
def _import_genos_from_oedb(network):
"""Import generator data from the Open Energy Database (OEDB).
The importer uses SQLAlchemy ORM objects.
These are defined in ego.io,
see https://github.com/openego/ego.io/tree/dev/egoio/db_tables
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
Notes
------
Right now only solar and wind generators can be imported.
"""
def _import_conv_generators(session):
"""Import conventional (conv) generators
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
List of medium-voltage generators
Notes
-----
You can find a full list of columns in
:func:`edisgo.data.import_data._update_grids`
"""
# build query
generators_sqla = session.query(
orm_conv_generators.columns.id,
orm_conv_generators.columns.subst_id,
orm_conv_generators.columns.la_id,
orm_conv_generators.columns.capacity,
orm_conv_generators.columns.type,
orm_conv_generators.columns.voltage_level,
orm_conv_generators.columns.fuel,
func.ST_AsText(func.ST_Transform(
orm_conv_generators.columns.geom, srid))
). \
filter(orm_conv_generators.columns.subst_id == network.mv_grid.id). \
filter(orm_conv_generators.columns.voltage_level.in_([4, 5, 6, 7])). \
filter(orm_conv_generators_version)
# read data from db
generators_mv = pd.read_sql_query(generators_sqla.statement,
session.bind,
index_col='id')
return generators_mv
def _import_res_generators(session):
"""Import renewable (res) generators
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
List of medium-voltage generators
:pandas:`pandas.DataFrame<dataframe>`
List of low-voltage generators
Notes
-----
You can find a full list of columns in
:func:`edisgo.data.import_data._update_grids`
If subtype is not specified it's set to 'unknown'.
"""
# Create filter for generation technologies
# ToDo: This needs to be removed when all generators can be imported
types_filter = orm_re_generators.columns.generation_type.in_(
['solar', 'wind'])
# build basic query
generators_sqla = session.query(
orm_re_generators.columns.id,
orm_re_generators.columns.subst_id,
orm_re_generators.columns.la_id,
orm_re_generators.columns.mvlv_subst_id,
orm_re_generators.columns.electrical_capacity,
orm_re_generators.columns.generation_type,
orm_re_generators.columns.generation_subtype,
orm_re_generators.columns.voltage_level,
orm_re_generators.columns.w_id,
func.ST_AsText(func.ST_Transform(
orm_re_generators.columns.rea_geom_new, srid)).label('geom'),
func.ST_AsText(func.ST_Transform(
orm_re_generators.columns.geom, srid)).label('geom_em')). \
filter(orm_re_generators.columns.subst_id == network.mv_grid.id). \
filter(orm_re_generators_version). \
filter(types_filter)
# extend basic query for MV generators and read data from db
generators_mv_sqla = generators_sqla. \
filter(orm_re_generators.columns.voltage_level.in_([4, 5]))
generators_mv = pd.read_sql_query(generators_mv_sqla.statement,
session.bind,
index_col='id')
# define generators with unknown subtype as 'unknown'
generators_mv.loc[generators_mv[
'generation_subtype'].isnull(),
'generation_subtype'] = 'unknown'
# extend basic query for LV generators and read data from db
generators_lv_sqla = generators_sqla. \
filter(orm_re_generators.columns.voltage_level.in_([6, 7]))
generators_lv = pd.read_sql_query(generators_lv_sqla.statement,
session.bind,
index_col='id')
# define generators with unknown subtype as 'unknown'
generators_lv.loc[generators_lv[
'generation_subtype'].isnull(),
'generation_subtype'] = 'unknown'
return generators_mv, generators_lv
def _update_grids(network, generators_mv, generators_lv, remove_missing=True):
"""Update imported status quo DINGO-grid according to new generator dataset
It
* adds new generators to grid if they do not exist
* updates existing generators if parameters have changed
* removes existing generators from grid which do not exist in the imported dataset
Steps:
* Step 1: MV generators: Update existing, create new, remove decommissioned
* Step 2: LV generators (single units): Update existing, remove decommissioned
* Step 3: LV generators (in aggregated MV generators): Update existing,
remove decommissioned
(aggregated MV generators = originally LV generators from aggregated Load
Areas which were aggregated during import from ding0.)
* Step 4: LV generators (single units + aggregated MV generators): Create new
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
generators_mv: :pandas:`pandas.DataFrame<dataframe>`
List of MV generators
Columns:
* id: :obj:`int` (index column)
* electrical_capacity: :obj:`float` (unit: kW)
* generation_type: :obj:`str` (e.g. 'solar')
* generation_subtype: :obj:`str` (e.g. 'solar_roof_mounted')
* voltage level: :obj:`int` (range: 4..7,)
* geom: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
* geom_em: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
generators_lv: :pandas:`pandas.DataFrame<dataframe>`
List of LV generators
Columns:
* id: :obj:`int` (index column)
* mvlv_subst_id: :obj:`int` (id of MV-LV substation in grid
= grid which the generator will be connected to)
* electrical_capacity: :obj:`float` (unit: kW)
* generation_type: :obj:`str` (e.g. 'solar')
* generation_subtype: :obj:`str` (e.g. 'solar_roof_mounted')
* voltage level: :obj:`int` (range: 4..7,)
* geom: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
* geom_em: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
remove_missing: :obj:`bool`
If true, remove generators from grid which are not included in the imported dataset.
"""
# set capacity difference threshold
cap_diff_threshold = 10 ** -4
# get existing generators in MV and LV grids
g_mv, g_lv, g_mv_agg = _build_generator_list(network=network)
# print current capacity
capacity_grid = 0
capacity_grid += sum([row['obj'].nominal_capacity for id, row in g_mv.iterrows()])
capacity_grid += sum([row['obj'].nominal_capacity for id, row in g_lv.iterrows()])
capacity_grid += sum([row['obj'].nominal_capacity for id, row in g_mv_agg.iterrows()])
logger.debug('Cumulative generator capacity (existing): {} kW'
.format(str(round(capacity_grid, 1)))
)
# ======================================
# Step 1: MV generators (existing + new)
# ======================================
logger.debug('==> MV generators')
logger.debug('{} generators imported.'
.format(str(len(generators_mv))))
# get existing genos (status quo DF format)
g_mv_existing = g_mv[g_mv['id'].isin(list(generators_mv.index.values))]
# get existing genos (new genos DF format)
generators_mv_existing = generators_mv[generators_mv.index.isin(list(g_mv_existing['id']))]
# remove existing ones from grid's geno list
g_mv = g_mv[~g_mv.isin(g_mv_existing)].dropna()
# TEMP: BACKUP 1 GENO FOR TESTING
#temp_geno = generators_mv_existing.iloc[0]
#temp_geno['geom_em'] = temp_geno['geom_em'].replace('10.667', '10.64')
# iterate over exiting generators and check whether capacity has changed
log_geno_count = 0
log_geno_cap = 0
for id, row in generators_mv_existing.iterrows():
geno_existing = g_mv_existing[g_mv_existing['id'] == id]['obj'].iloc[0]
# check if capacity equals; if not: update capacity
if abs(row['electrical_capacity'] - \
geno_existing.nominal_capacity) < cap_diff_threshold:
continue
else:
log_geno_cap += row['electrical_capacity'] - geno_existing.nominal_capacity
log_geno_count += 1
geno_existing.nominal_capacity = row['electrical_capacity']
# check if cap=0 (this may happen if dp is buggy)
if row['electrical_capacity'] <= 0:
geno_existing.grid.graph.remove_node(geno_existing)
logger.warning('Capacity of generator {} is <=0, generator removed. '
'Check your data source.'
.format(repr(geno_existing))
)
logger.debug('Capacities of {} of {} existing generators updated ({} kW).'
.format(str(log_geno_count),
str(len(generators_mv_existing) - log_geno_count),
str(round(log_geno_cap, 1))
)
)
# new genos
log_geno_count = 0
log_geno_cap = 0
generators_mv_new = generators_mv[~generators_mv.index.isin(
list(g_mv_existing['id']))]
# remove them from grid's geno list
g_mv = g_mv[~g_mv.isin(list(generators_mv_new.index.values))].dropna()
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
#generators_mv_new = generators_mv_new.append(temp_geno)
# iterate over new generators and create them
for id, row in generators_mv_new.iterrows():
# check if geom is available, skip otherwise
geom = _check_geom(id, row)
if not geom:
logger.warning('Generator {} has no geom entry at all and will'
'not be imported!'.format(id))
continue
# create generator object and add it to MV grid's graph
if row['generation_type'] in ['solar', 'wind']:
network.mv_grid.graph.add_node(
GeneratorFluctuating(
id=id,
grid=network.mv_grid,
nominal_capacity=row['electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
weather_cell_id=row['w_id'],
geom=wkt_loads(geom)),
type='generator')
else:
network.mv_grid.graph.add_node(
Generator(id=id,
grid=network.mv_grid,
nominal_capacity=row['electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
geom=wkt_loads(geom)
),
type='generator')
log_geno_cap += row['electrical_capacity']
log_geno_count += 1
logger.debug('{} of {} new generators added ({} kW).'
.format(str(log_geno_count),
str(len(generators_mv_new)),
str(round(log_geno_cap, 1))
)
)
# remove decommissioned genos
# (genos which exist in grid but not in the new dataset)
log_geno_cap = 0
if not g_mv.empty and remove_missing:
log_geno_count = 0
for _, row in g_mv.iterrows():
log_geno_cap += row['obj'].nominal_capacity
row['obj'].grid.graph.remove_node(row['obj'])
log_geno_count += 1
logger.debug('{} of {} decommissioned generators removed ({} kW).'
.format(str(log_geno_count),
str(len(g_mv)),
str(round(log_geno_cap, 1))
)
)
# =============================================
# Step 2: LV generators (single existing units)
# =============================================
logger.debug('==> LV generators')
logger.debug('{} generators imported.'.format(str(len(generators_lv))))
# get existing genos (status quo DF format)
g_lv_existing = g_lv[g_lv['id'].isin(list(generators_lv.index.values))]
# get existing genos (new genos DF format)
generators_lv_existing = generators_lv[generators_lv.index.isin(list(g_lv_existing['id']))]
# TEMP: BACKUP 1 GENO FOR TESTING
# temp_geno = g_lv.iloc[0]
# remove existing ones from grid's geno list
g_lv = g_lv[~g_lv.isin(g_lv_existing)].dropna()
# iterate over exiting generators and check whether capacity has changed
log_geno_count = 0
log_geno_cap = 0
for id, row in generators_lv_existing.iterrows():
geno_existing = g_lv_existing[g_lv_existing['id'] == id]['obj'].iloc[0]
# check if capacity equals; if not: update capacity
if abs(row['electrical_capacity'] - \
geno_existing.nominal_capacity) < cap_diff_threshold:
continue
else:
log_geno_cap += row['electrical_capacity'] - geno_existing.nominal_capacity
log_geno_count += 1
geno_existing.nominal_capacity = row['electrical_capacity']
logger.debug('Capacities of {} of {} existing generators (single units) updated ({} kW).'
.format(str(log_geno_count),
str(len(generators_lv_existing) - log_geno_count),
str(round(log_geno_cap, 1))
)
)
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
# g_lv.loc[len(g_lv)] = temp_geno
# remove decommissioned genos
# (genos which exist in grid but not in the new dataset)
log_geno_cap = 0
if not g_lv.empty and remove_missing:
log_geno_count = 0
for _, row in g_lv.iterrows():
log_geno_cap += row['obj'].nominal_capacity
row['obj'].grid.graph.remove_node(row['obj'])
log_geno_count += 1
logger.debug('{} of {} decommissioned generators (single units) removed ({} kW).'
.format(str(log_geno_count),
str(len(g_lv)),
str(round(log_geno_cap, 1))
)
)
# ====================================================================================
# Step 3: LV generators (existing in aggregated units (originally from aggregated LA))
# ====================================================================================
g_lv_agg = network.dingo_import_data
g_lv_agg_existing = g_lv_agg[g_lv_agg['id'].isin(list(generators_lv.index.values))]
generators_lv_agg_existing = generators_lv[generators_lv.index.isin(list(g_lv_agg_existing['id']))]
# TEMP: BACKUP 1 GENO FOR TESTING
# temp_geno = g_lv_agg.iloc[0]
g_lv_agg = g_lv_agg[~g_lv_agg.isin(g_lv_agg_existing)].dropna()
log_geno_count = 0
log_agg_geno_list = []
log_geno_cap = 0
for id, row in generators_lv_agg_existing.iterrows():
# check if capacity equals; if not: update capacity off agg. geno
cap_diff = row['electrical_capacity'] - \
g_lv_agg_existing[g_lv_agg_existing['id'] == id]['capacity'].iloc[0]
if abs(cap_diff) < cap_diff_threshold:
continue
else:
agg_geno = g_lv_agg_existing[g_lv_agg_existing['id'] == id]['agg_geno'].iloc[0]
agg_geno.nominal_capacity += cap_diff
log_geno_cap += cap_diff
log_geno_count += 1
log_agg_geno_list.append(agg_geno)
logger.debug('Capacities of {} of {} existing generators (in {} of {} aggregated units) '
'updated ({} kW).'
.format(str(log_geno_count),
str(len(generators_lv_agg_existing) - log_geno_count),
str(len(set(log_agg_geno_list))),
str(len(g_lv_agg_existing['agg_geno'].unique())),
str(round(log_geno_cap, 1))
)
)
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
# g_lv_agg.loc[len(g_lv_agg)] = temp_geno
# remove decommissioned genos
# (genos which exist in grid but not in the new dataset)
log_geno_cap = 0
if not g_lv_agg.empty and remove_missing:
log_geno_count = 0
for _, row in g_lv_agg.iterrows():
row['agg_geno'].nominal_capacity -= row['capacity']
log_geno_cap += row['capacity']
# remove LV geno id from id string of agg. geno
id = row['agg_geno'].id.split('-')
ids = id[2].split('_')
ids.remove(str(int(row['id'])))
row['agg_geno'].id = '-'.join([id[0], id[1], '_'.join(ids)])
# after removing the LV geno from agg geno, is the agg. geno empty?
# if yes, remove it from grid
if not ids:
row['agg_geno'].grid.graph.remove_node(row['agg_geno'])
log_geno_count += 1
logger.debug('{} of {} decommissioned generators in aggregated generators removed ({} kW).'
.format(str(log_geno_count),
str(len(g_lv_agg)),
str(round(log_geno_cap, 1))
)
)
# ====================================================================
# Step 4: LV generators (new single units + genos in aggregated units)
# ====================================================================
# new genos
log_geno_count =\
log_agg_geno_new_count =\
log_agg_geno_upd_count = 0
# TEMP: BACKUP 1 GENO FOR TESTING
#temp_geno = generators_lv[generators_lv.index == g_lv_existing.iloc[0]['id']]
generators_lv_new = generators_lv[~generators_lv.index.isin(list(g_lv_existing['id'])) &
~generators_lv.index.isin(list(g_lv_agg_existing['id']))]
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
#generators_lv_new = generators_lv_new.append(temp_geno)
# dict for new agg. generators
agg_geno_new = {}
# get LV grid districts
lv_grid_dict = _build_lv_grid_dict(network)
# get predefined random seed and initialize random generator
seed = int(network.config['grid_connection']['random_seed'])
random.seed(a=seed)
# check if none of new generators can be allocated to an existing LV grid
if not any([_ in lv_grid_dict.keys()
for _ in list(generators_lv_new['mvlv_subst_id'])]):
logger.warning('None of the imported generators can be allocated '
'to an existing LV grid. Check compatibility of grid '
'and generator datasets.')
# iterate over new (single unit or part of agg. unit) generators and create them
log_geno_cap = 0
for id, row in generators_lv_new.iterrows():
lv_geno_added_to_agg_geno = False
# new unit is part of agg. LA (mvlv_subst_id is different from existing
# ones in LV grids of non-agg. load areas)
if (row['mvlv_subst_id'] not in lv_grid_dict.keys() and
row['la_id'] and not isnan(row['la_id']) and
row['mvlv_subst_id'] and not isnan(row['mvlv_subst_id'])):
# check if new unit can be added to existing agg. generator
# (LA id, type and subtype match) -> update existing agg. generator.
# Normally, this case should not occur since `subtype` of new genos
# is set to a new value (e.g. 'solar')
for _, agg_row in g_mv_agg.iterrows():
if (agg_row['la_id'] == int(row['la_id']) and
agg_row['obj'].type == row['generation_type'] and
agg_row['obj'].subtype == row['generation_subtype']):
agg_row['obj'].nominal_capacity += row['electrical_capacity']
agg_row['obj'].id += '_{}'.format(str(id))
log_agg_geno_upd_count += 1
lv_geno_added_to_agg_geno = True
if not lv_geno_added_to_agg_geno:
la_id = int(row['la_id'])
if la_id not in agg_geno_new:
agg_geno_new[la_id] = {}
if row['voltage_level'] not in agg_geno_new[la_id]:
agg_geno_new[la_id][row['voltage_level']] = {}
if row['generation_type'] not in agg_geno_new[la_id][row['voltage_level']]:
agg_geno_new[la_id][row['voltage_level']][row['generation_type']] = {}
if row['generation_subtype'] not in \
agg_geno_new[la_id][row['voltage_level']][row['generation_type']]:
agg_geno_new[la_id][row['voltage_level']][row['generation_type']]\
.update({row['generation_subtype']: {'ids': [int(id)],
'capacity': row['electrical_capacity']
}
}
)
else:
agg_geno_new[la_id][row['voltage_level']][row['generation_type']] \
[row['generation_subtype']]['ids'].append(int(id))
agg_geno_new[la_id][row['voltage_level']][row['generation_type']] \
[row['generation_subtype']]['capacity'] += row['electrical_capacity']
# new generator is a single (non-aggregated) unit
else:
# check if geom is available
geom = _check_geom(id, row)
if row['generation_type'] in ['solar', 'wind']:
gen = GeneratorFluctuating(
id=id,
grid=None,
nominal_capacity=row['electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
weather_cell_id=row['w_id'],
geom=wkt_loads(geom) if geom else geom)
else:
gen = Generator(id=id,
grid=None,
nominal_capacity=row[
'electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
geom=wkt_loads(geom) if geom else geom)
# TEMP: REMOVE MVLV SUBST ID FOR TESTING
#row['mvlv_subst_id'] = None
# check if MV-LV substation id exists. if not, allocate to
# random one
lv_grid = _check_mvlv_subst_id(
generator=gen,
mvlv_subst_id=row['mvlv_subst_id'],
lv_grid_dict=lv_grid_dict)
gen.grid = lv_grid
lv_grid.graph.add_node(gen, type='generator')
log_geno_count += 1
log_geno_cap += row['electrical_capacity']
# there are new agg. generators to be created
if agg_geno_new:
pfac_mv_gen = network.config['reactive_power_factor']['mv_gen']
# add aggregated generators
for la_id, val in agg_geno_new.items():
for v_level, val2 in val.items():
for type, val3 in val2.items():
for subtype, val4 in val3.items():
if type in ['solar', 'wind']:
gen = GeneratorFluctuating(
id='agg-' + str(la_id) + '-' + '_'.join([
str(_) for _ in val4['ids']]),
grid=network.mv_grid,
nominal_capacity=val4['capacity'],
type=type,
subtype=subtype,
v_level=4,
# ToDo: get correct w_id
weather_cell_id=row['w_id'],
geom=network.mv_grid.station.geom)
else:
gen = Generator(
id='agg-' + str(la_id) + '-' + '_'.join([
str(_) for _ in val4['ids']]),
nominal_capacity=val4['capacity'],
type=type,
subtype=subtype,
geom=network.mv_grid.station.geom,
grid=network.mv_grid,
v_level=4)
network.mv_grid.graph.add_node(
gen, type='generator_aggr')
# select cable type
line_type, line_count = select_cable(
network=network,
level='mv',
apparent_power=gen.nominal_capacity /
pfac_mv_gen)
# connect generator to MV station
line = Line(id='line_aggr_generator_la_' + str(la_id) + '_vlevel_{v_level}_'
'{subtype}'.format(
v_level=v_level,
subtype=subtype),
type=line_type,
kind='cable',
quantity=line_count,
length=1e-3,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(network.mv_grid.station,
gen,
line=line,
type='line_aggr')
log_agg_geno_new_count += len(val4['ids'])
log_geno_cap += val4['capacity']
logger.debug('{} of {} new generators added ({} single units, {} to existing '
'agg. generators and {} units as new aggregated generators) '
'(total: {} kW).'
.format(str(log_geno_count +
log_agg_geno_new_count +
log_agg_geno_upd_count),
str(len(generators_lv_new)),
str(log_geno_count),
str(log_agg_geno_upd_count),
str(log_agg_geno_new_count),
str(round(log_geno_cap, 1))
)
)
def _check_geom(id, row):
"""Checks if a valid geom is available in dataset
If yes, this geom will be used.
If not:
* MV generators: use geom from EnergyMap.
* LV generators: set geom to None. It is re-set in
:func:`edisgo.data.import_data._check_mvlv_subst_id`
to MV-LV station's geom. EnergyMap's geom is not used
since it is more inaccurate than the station's geom.
Parameters
----------
id : :obj:`int`
Id of generator
row : :pandas:`pandas.Series<series>`
Generator dataset
Returns
-------
:shapely:`Shapely Point object<points>` or None
Geom of generator. None, if no geom is available.
"""
geom = None
# check if geom is available
if row['geom']:
geom = row['geom']
else:
# MV generators: set geom to EnergyMap's geom, if available
if int(row['voltage_level']) in [4,5]:
# check if original geom from Energy Map is available
if row['geom_em']:
geom = row['geom_em']
logger.debug('Generator {} has no geom entry, EnergyMap\'s geom entry will be used.'
.format(id)
)
return geom
def _check_mvlv_subst_id(generator, mvlv_subst_id, lv_grid_dict):
"""Checks if MV-LV substation id of single LV generator is missing or invalid.
If so, a random one from existing stations in LV grids will be assigned.
Parameters
----------
generator : :class:`~.grid.components.Generator`
LV generator
mvlv_subst_id : :obj:`int`
MV-LV substation id
lv_grid_dict : :obj:`dict`
Dict of existing LV grids
Format: {:obj:`int`: :class:`~.grid.grids.LVGrid`}
Returns
-------
:class:`~.grid.grids.LVGrid`
LV grid of generator
"""
if mvlv_subst_id and not isnan(mvlv_subst_id):
# assume that given LA exists
try:
# get LV grid
lv_grid = lv_grid_dict[mvlv_subst_id]
# if no geom, use geom of station
if not generator.geom:
generator.geom = lv_grid.station.geom
logger.debug('Generator {} has no geom entry, stations\' geom will be used.'
.format(generator.id)
)
return lv_grid
# if LA/LVGD does not exist, choose random LVGD and move generator to station of LVGD
# this occurs due to exclusion of LA with peak load < 1kW
except:
lv_grid = random.choice(list(lv_grid_dict.values()))
generator.geom = lv_grid.station.geom
logger.warning('Generator {} cannot be assigned to '
'non-existent LV Grid and was '
'allocated to a random LV Grid ({}); '
'geom was set to stations\' geom.'
.format(repr(generator),
repr(lv_grid)))
pass
return lv_grid
else:
lv_grid = random.choice(list(lv_grid_dict.values()))
generator.geom = lv_grid.station.geom
logger.warning('Generator {} has no mvlv_subst_id and was '
'allocated to a random LV Grid ({}); '
'geom was set to stations\' geom.'
.format(repr(generator),
repr(lv_grid)))
pass
return lv_grid
def _validate_generation():
"""Validate generators in updated grids
The validation uses the cumulative capacity of all generators.
"""
# ToDo: Valdate conv. genos too!
# set capacity difference threshold
cap_diff_threshold = 10 ** -4
capacity_imported = generators_res_mv['electrical_capacity'].sum() + \
generators_res_lv['electrical_capacity'].sum() #+ \
#generators_conv_mv['capacity'].sum()
capacity_grid = 0
# MV genos
for geno in network.mv_grid.generators:
capacity_grid += geno.nominal_capacity
# LV genos
for lv_grid in network.mv_grid.lv_grids:
for geno in lv_grid.generators:
capacity_grid += geno.nominal_capacity
logger.debug('Cumulative generator capacity (updated): {} kW'
.format(str(round(capacity_imported, 1)))
)
if abs(capacity_imported - capacity_grid) > cap_diff_threshold:
raise ValueError('Cumulative capacity of imported generators ({} kW) '
'differ from cumulative capacity of generators '
'in updated grid ({} kW) by {} kW.'
.format(str(round(capacity_imported, 1)),
str(round(capacity_grid, 1)),
str(round(capacity_imported - capacity_grid, 1))
)
)
else:
logger.debug('Cumulative capacity of imported generators validated.')
def _validate_sample_geno_location():
if all(generators_res_lv['geom'].notnull()) \
and all(generators_res_mv['geom'].notnull()) \
and not generators_res_lv['geom'].empty \
and not generators_res_mv['geom'].empty:
# get geom of 1 random MV and 1 random LV generator and transform
sample_mv_geno_geom_shp = transform(proj2equidistant(network),
wkt_loads(generators_res_mv['geom']
.dropna()
.sample(n=1)
.item())
)
sample_lv_geno_geom_shp = transform(proj2equidistant(network),
wkt_loads(generators_res_lv['geom']
.dropna()
.sample(n=1)
.item())
)
# get geom of MV grid district
mvgd_geom_shp = transform(proj2equidistant(network),
network.mv_grid.grid_district['geom']
)
# check if MVGD contains geno
if not (mvgd_geom_shp.contains(sample_mv_geno_geom_shp) and
mvgd_geom_shp.contains(sample_lv_geno_geom_shp)):
raise ValueError('At least one imported generator is not located '
'in the MV grid area. Check compatibility of '
'grid and generator datasets.')
srid = int(network.config['geo']['srid'])
oedb_data_source = network.config['data_source']['oedb_data_source']
scenario = network.generator_scenario
if oedb_data_source == 'model_draft':
# load ORM names
orm_conv_generators_name = network.config['model_draft']['conv_generators_prefix'] + \
scenario + \
network.config['model_draft']['conv_generators_suffix']
orm_re_generators_name = network.config['model_draft']['re_generators_prefix'] + \
scenario + \
network.config['model_draft']['re_generators_suffix']
# import ORMs
orm_conv_generators = model_draft.__getattribute__(orm_conv_generators_name)
orm_re_generators = model_draft.__getattribute__(orm_re_generators_name)
# set dummy version condition (select all generators)
orm_conv_generators_version = 1 == 1
orm_re_generators_version = 1 == 1
elif oedb_data_source == 'versioned':
# load ORM names
orm_conv_generators_name = network.config['versioned']['conv_generators_prefix'] + \
scenario + \
network.config['versioned']['conv_generators_suffix']
orm_re_generators_name = network.config['versioned']['re_generators_prefix'] + \
scenario + \
network.config['versioned']['re_generators_suffix']
data_version = network.config['versioned']['version']
# import ORMs
orm_conv_generators = supply.__getattribute__(orm_conv_generators_name)
orm_re_generators = supply.__getattribute__(orm_re_generators_name)
# set version condition
orm_conv_generators_version = orm_conv_generators.columns.version == data_version
orm_re_generators_version = orm_re_generators.columns.version == data_version
# get conventional and renewable generators
with session_scope() as session:
#generators_conv_mv = _import_conv_generators(session)
generators_res_mv, generators_res_lv = _import_res_generators(
session)
#generators_mv = generators_conv_mv.append(generators_res_mv)
_validate_sample_geno_location()
_update_grids(network=network,
#generators_mv=generators_mv,
generators_mv=generators_res_mv,
generators_lv=generators_res_lv)
_validate_generation()
connect_mv_generators(network=network)
connect_lv_generators(network=network)
def _import_genos_from_pypsa(network, file):
"""Import generator data from a pyPSA file.
TBD
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
file: :obj:`str`
File including path
"""
raise NotImplementedError
# generators = pd.read_csv(file,
# comment='#',
# index_col='name',
# delimiter=',',
# decimal='.'
# )
def _build_generator_list(network):
"""Builds DataFrames with all generators in MV and LV grids
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to MV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to LV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to aggregated LV generators
"""
genos_mv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv_agg = pd.DataFrame(columns=
('la_id', 'id', 'obj'))
# MV genos
for geno in network.mv_grid.graph.nodes_by_attribute('generator'):
genos_mv.loc[len(genos_mv)] = [int(geno.id), geno]
for geno in network.mv_grid.graph.nodes_by_attribute('generator_aggr'):
la_id = int(geno.id.split('-')[1].split('_')[-1])
genos_lv_agg.loc[len(genos_lv_agg)] = [la_id, geno.id, geno]
# LV genos
for lv_grid in network.mv_grid.lv_grids:
for geno in lv_grid.generators:
genos_lv.loc[len(genos_lv)] = [int(geno.id), geno]
return genos_mv, genos_lv, genos_lv_agg
def _build_lv_grid_dict(network):
"""Creates dict of LV grids
LV grid ids are used as keys, LV grid references as values.
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
Returns
-------
:obj:`dict`
Format: {:obj:`int`: :class:`~.grid.grids.LVGrid`}
"""
lv_grid_dict = {}
for lv_grid in network.mv_grid.lv_grids:
lv_grid_dict[lv_grid.id] = lv_grid
return lv_grid_dict
def import_feedin_timeseries(config_data, weather_cell_ids):
"""
Import RES feed-in time series data and process
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
weather_cell_ids : :obj:`list`
List of weather cell id's (integers) to obtain feed-in data for.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Feedin time series
"""
def _retrieve_timeseries_from_oedb(session):
"""Retrieve time series from oedb
"""
# ToDo: add option to retrieve subset of time series
# ToDo: find the reference power class for mvgrid/w_id and insert instead of 4
feedin_sqla = session.query(
orm_feedin.w_id,
orm_feedin.source,
orm_feedin.feedin). \
filter(orm_feedin.w_id.in_(weather_cell_ids)). \
filter(orm_feedin.power_class.in_([0, 4])). \
filter(orm_feedin_version)
feedin = pd.read_sql_query(feedin_sqla.statement,
session.bind,
index_col=['source', 'w_id'])
return feedin
if config_data['data_source']['oedb_data_source'] == 'model_draft':
orm_feedin_name = config_data['model_draft']['res_feedin_data']
orm_feedin = model_draft.__getattribute__(orm_feedin_name)
orm_feedin_version = 1 == 1
else:
orm_feedin_name = config_data['versioned']['res_feedin_data']
orm_feedin = supply.__getattribute__(orm_feedin_name)
orm_feedin_version = orm_feedin.version == config_data['versioned'][
'version']
with session_scope() as session:
feedin = _retrieve_timeseries_from_oedb(session)
feedin.sort_index(axis=0, inplace=True)
timeindex = pd.date_range('1/1/2011', periods=8760, freq='H')
recasted_feedin_dict = {}
for type_w_id in feedin.index:
recasted_feedin_dict[type_w_id] = feedin.loc[
type_w_id, :].values[0]
feedin = pd.DataFrame(recasted_feedin_dict, index=timeindex)
# rename 'wind_onshore' and 'wind_offshore' to 'wind'
new_level = [_ if _ not in ['wind_onshore']
else 'wind' for _ in feedin.columns.levels[0]]
feedin.columns.set_levels(new_level, level=0, inplace=True)
feedin.columns.rename('type', level=0, inplace=True)
feedin.columns.rename('weather_cell_id', level=1, inplace=True)
return feedin
def import_load_timeseries(config_data, data_source, mv_grid_id=None,
year=None):
"""
Import load time series
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
data_source : str
Specify type of data source. Available data sources are
* 'demandlib'
Determine a load time series with the use of the demandlib.
This calculates standard load profiles for 4 different sectors.
mv_grid_id : :obj:`str`
MV grid ID as used in oedb. Provide this if `data_source` is 'oedb'.
Default: None.
year : int
Year for which to generate load time series. Provide this if
`data_source` is 'demandlib'. Default: None.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Load time series
"""
def _import_load_timeseries_from_oedb(config_data, mv_grid_id):
"""
Retrieve load time series from oedb
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Load time series
Notes
------
This is currently not a valid option to retrieve load time series
since time series in the oedb are not differentiated by sector. An
issue concerning this has been created.
"""
if config_data['versioned']['version'] == 'model_draft':
orm_load_name = config_data['model_draft']['load_data']
orm_load = model_draft.__getattribute__(orm_load_name)
orm_load_areas_name = config_data['model_draft']['load_areas']
orm_load_areas = model_draft.__getattribute__(orm_load_areas_name)
orm_load_version = 1 == 1
else:
orm_load_name = config_data['versioned']['load_data']
# orm_load = supply.__getattribute__(orm_load_name)
# ToDo: remove workaround
orm_load = model_draft.__getattribute__(orm_load_name)
# orm_load_version = orm_load.version == config.data['versioned']['version']
orm_load_areas_name = config_data['versioned']['load_areas']
# orm_load_areas = supply.__getattribute__(orm_load_areas_name)
# ToDo: remove workaround
orm_load_areas = model_draft.__getattribute__(orm_load_areas_name)
# orm_load_areas_version = orm_load.version == config.data['versioned']['version']
orm_load_version = 1 == 1
with session_scope() as session:
load_sqla = session.query( # orm_load.id,
orm_load.p_set,
orm_load.q_set,
orm_load_areas.subst_id). \
join(orm_load_areas, orm_load.id == orm_load_areas.otg_id). \
filter(orm_load_areas.subst_id == mv_grid_id). \
filter(orm_load_version). \
distinct()
load = pd.read_sql_query(load_sqla.statement,
session.bind,
index_col='subst_id')
return load
def _load_timeseries_demandlib(config_data, year):
"""
Get normalized sectoral load time series
Time series are normalized to 1 kWh consumption per year
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
year : int
Year for which to generate load time series.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Load time series
"""
sectoral_consumption = {'h0': 1, 'g0': 1, 'i0': 1, 'l0': 1}
cal = Germany()
holidays = dict(cal.holidays(year))
e_slp = bdew.ElecSlp(year, holidays=holidays)
# multiply given annual demand with timeseries
elec_demand = e_slp.get_profile(sectoral_consumption)
# Add the slp for the industrial group
ilp = profiles.IndustrialLoadProfile(e_slp.date_time_index,
holidays=holidays)
# Beginning and end of workday, weekdays and weekend days, and scaling
# factors by default
elec_demand['i0'] = ilp.simple_profile(
sectoral_consumption['i0'],
am=datetime.time(config_data['demandlib']['day_start'].hour,
config_data['demandlib']['day_start'].minute, 0),
pm=datetime.time(config_data['demandlib']['day_end'].hour,
config_data['demandlib']['day_end'].minute, 0),
profile_factors=
{'week': {'day': config_data['demandlib']['week_day'],
'night': config_data['demandlib']['week_night']},
'weekend': {'day': config_data['demandlib']['weekend_day'],
'night': config_data['demandlib']['weekend_night']}})
# Resample 15-minute values to hourly values and sum across sectors
elec_demand = elec_demand.resample('H').mean()
return elec_demand
if data_source == 'oedb':
load = _import_load_timeseries_from_oedb(config_data, mv_grid_id)
elif data_source == 'demandlib':
load = _load_timeseries_demandlib(config_data, year)
load.rename(columns={'g0': 'retail', 'h0': 'residential',
'l0': 'agricultural', 'i0': 'industrial'},
inplace=True)
return load
| agpl-3.0 |
rs2/pandas | pandas/tests/io/parser/conftest.py | 1 | 2798 | import os
from typing import List, Optional
import pytest
from pandas import read_csv, read_table
class BaseParser:
engine: Optional[str] = None
low_memory = True
float_precision_choices: List[Optional[str]] = []
def update_kwargs(self, kwargs):
kwargs = kwargs.copy()
kwargs.update(dict(engine=self.engine, low_memory=self.low_memory))
return kwargs
def read_csv(self, *args, **kwargs):
kwargs = self.update_kwargs(kwargs)
return read_csv(*args, **kwargs)
def read_table(self, *args, **kwargs):
kwargs = self.update_kwargs(kwargs)
return read_table(*args, **kwargs)
class CParser(BaseParser):
engine = "c"
float_precision_choices = [None, "high", "round_trip"]
class CParserHighMemory(CParser):
low_memory = False
class CParserLowMemory(CParser):
low_memory = True
class PythonParser(BaseParser):
engine = "python"
float_precision_choices = [None]
@pytest.fixture
def csv_dir_path(datapath):
"""
The directory path to the data files needed for parser tests.
"""
return datapath("io", "parser", "data")
@pytest.fixture
def csv1(datapath):
"""
The path to the data file "test1.csv" needed for parser tests.
"""
return os.path.join(datapath("io", "data", "csv"), "test1.csv")
_cParserHighMemory = CParserHighMemory()
_cParserLowMemory = CParserLowMemory()
_pythonParser = PythonParser()
_py_parsers_only = [_pythonParser]
_c_parsers_only = [_cParserHighMemory, _cParserLowMemory]
_all_parsers = [*_c_parsers_only, *_py_parsers_only]
_py_parser_ids = ["python"]
_c_parser_ids = ["c_high", "c_low"]
_all_parser_ids = [*_c_parser_ids, *_py_parser_ids]
@pytest.fixture(params=_all_parsers, ids=_all_parser_ids)
def all_parsers(request):
"""
Fixture all of the CSV parsers.
"""
return request.param
@pytest.fixture(params=_c_parsers_only, ids=_c_parser_ids)
def c_parser_only(request):
"""
Fixture all of the CSV parsers using the C engine.
"""
return request.param
@pytest.fixture(params=_py_parsers_only, ids=_py_parser_ids)
def python_parser_only(request):
"""
Fixture all of the CSV parsers using the Python engine.
"""
return request.param
_utf_values = [8, 16, 32]
_encoding_seps = ["", "-", "_"]
_encoding_prefixes = ["utf", "UTF"]
_encoding_fmts = [
f"{prefix}{sep}" + "{0}" for sep in _encoding_seps for prefix in _encoding_prefixes
]
@pytest.fixture(params=_utf_values)
def utf_value(request):
"""
Fixture for all possible integer values for a UTF encoding.
"""
return request.param
@pytest.fixture(params=_encoding_fmts)
def encoding_fmt(request):
"""
Fixture for all possible string formats of a UTF encoding.
"""
return request.param
| bsd-3-clause |
fsxfreak/nlp-work | src/train_map.py | 1 | 6406 | import tensorflow as tf
import numpy as np
import pandas as pd
from gensim.models.keyedvectors import KeyedVectors
import time, math, random, string
print(random.__file__)
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('%r (%r, %r) %2.2f sec' % \
(method.__name__, args, kw, te-ts))
return result
return timed
class Trainer(object):
_SPACE_DIR = '/usr/share/lang-corpus/word2vec/'
_SRC_SPACE_FILENAME = _SPACE_DIR + 'GoogleNews-vectors-negative300-SLIM.bin'
_TRG_SPACE_FILENAME = _SPACE_DIR + 'de-vectors.bin'
_VEC_SHAPE = (1,300)
_MAP_SHAPE = (300,300)
_NUM_NEG = 10# k, Lazaridou et. al, 2015
@timeit
def __init__(self, train_filename, test_filename):
self.src_mdl = KeyedVectors.load_word2vec_format(self._SRC_SPACE_FILENAME, binary=True)
self.trg_mdl = KeyedVectors.load_word2vec_format(self._TRG_SPACE_FILENAME, binary=True)
self.set_labels(train_filename, test_filename)
self.build_graph()
@timeit
def _load_labels(self, filename):
# TODO don't ignore first row
data_raw = pd.read_csv(filename, sep=' ')
data_raw.columns = ['src', 'trg']
data = []
for src_raw, trg_raw in list(zip(data_raw.src, data_raw.trg)):
src_vec = self.src_mdl[src_raw]
trg_vec = self.trg_mdl[trg_raw]
src_vec.shape = self._VEC_SHAPE
trg_vec.shape = self._VEC_SHAPE
data.append(((src_raw, src_vec), (trg_raw, trg_vec)))
return data
def set_labels(self, train_filename, test_filename):
self.train_labels = self._load_labels(train_filename)
self.test_labels = self._load_labels(test_filename)
'''
Builds the Tensorflow computation graph. Should call this every time
Trainer.set_labels() is called (but not entirely necessary).
'''
def build_graph(self):
x = tf.placeholder(tf.float32, shape=self._VEC_SHAPE, name='x')
y = tf.placeholder(tf.float32, shape=self._VEC_SHAPE, name='y')
W = tf.get_variable('W', shape=self._MAP_SHAPE, dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(0.1))
y_hat = tf.matmul(x, W, name='y_hat')
'''
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = (tf.reduce_sum(tf.squared_difference(y_hat, y),name='loss')
+ 0.01 * sum(reg_losses))
'''
negs = [ tf.placeholder(tf.float32,
shape=self._VEC_SHAPE,
name='y_%d' % i) for i in range(self._NUM_NEG) ]
def dist(t1, t2):
return tf.divide(tf.acos(
tf.divide(tf.reduce_sum(tf.multiply(t1, t2)),
tf.multiply(tf.norm(t1), tf.norm(t2)))
),
tf.constant(math.pi))
gamma = tf.constant(0.75)
loss = tf.reduce_sum(tf.stack(
[ tf.maximum(tf.zeros(shape=()), tf.add(gamma,
tf.subtract(dist(y_hat, y),
dist(y_hat, neg)))) for neg in negs ]))
minimizer = (tf.train.GradientDescentOptimizer(0.005)
.minimize(loss))
sess = tf.Session()
self.tf_g = {
'x' : x, 'y' : y, 'W' : W, 'y_hat' : y_hat, 'negs' : negs,
#'reg_losses' : reg_losses,
'loss' : loss,
'minimizer' : minimizer,
'sess' : sess
}
self.tf_g['sess'].run(tf.global_variables_initializer())
for t in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
print(t.name)
def train_step(self):
for src, trg in self.train_labels:
src_vec = src[1]
trg_vec = trg[1]
feed_dict = {
self.tf_g['x'] : src_vec,
self.tf_g['y'] : trg_vec
}
W = self.tf_g['sess'].run(self.tf_g['W'])
# cannot obtain from Session.run() because have not fed the new src_vec yet
y_hat = np.matmul(src_vec, W).T
y_hat.shape = (300,)
def cos_vec(v1, v2):
return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
scores = []
# randomly sample 4x num vectors for the negative examples
for e in range(self._NUM_NEG * 4):
seed = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
neg_poss = self.trg_mdl.seeded_vector(seed)
score = cos_vec(y_hat, neg_poss) - cos_vec(trg_vec, neg_poss)
scores.append((score, neg_poss))
scores = sorted(scores, key=lambda tup: tup[0], reverse=True)
for i, neg in enumerate(self.tf_g['negs']):
feed_dict[neg] = self.trg_mdl[scores[i][0]]
_, loss = self.tf_g['sess'].run(
[ self.tf_g['minimizer'], self.tf_g['loss'] ],
feed_dict=feed_dict)
return loss
def evaluate(self):
W = self.tf_g['sess'].run(self.tf_g['W'])
total_similarity = 0.0
for src, trg in self.test_labels:
src_vec = src[1]
src_word = src[0]
trg_word = trg[0]
trg_vec_pred = np.matmul(src_vec, W).T
trg_vec_pred.shape = (300,)
print('Golden translation: %s -> %s' % (src_word, trg_word))
translations = self.trg_mdl.similar_by_vector(trg_vec_pred, topn=5)
for i, x in enumerate(translations):
print('\t %d: %s' % (i, x))
gold_trg_vec = self.trg_mdl[trg_word]
similarity = (np.dot(trg_vec_pred, gold_trg_vec) /
(np.linalg.norm(trg_vec_pred) * np.linalg.norm(gold_trg_vec)))
print('\tSimilarity to golden: %.5f' % similarity)
total_similarity = total_similarity + similarity
print('Translation quality: %.5f' % (total_similarity / len(self.test_labels)))
def main():
'''
print('Testing animal on overall train')
trainer = Trainer('../data/en-de-available-overall-train.dict',
'../data/en-de-available-number-test.dict')
for x in range(10):
trainer.train_step()
trainer.evaluate()
'''
print('Testing number on number train')
trainer = Trainer('../data/en-de-available-number-train.dict',
'../data/en-de-available-number-test.dict')
old_loss = trainer.train_step()
print('loss', old_loss)
while True:
new_loss = trainer.train_step()
print('loss', new_loss)
if abs(old_loss - new_loss) < 0.001:
break
old_loss = new_loss
trainer.evaluate()
if __name__ == '__main__':
main()
| mit |
chreman/visualizations | trending/trending.py | 2 | 7867 | # main.py
import numpy as np
import pandas as pd
from bokeh.layouts import column, row
from bokeh.plotting import Figure, show
from bokeh.embed import standalone_html_page_for_models
from bokeh.models import ColumnDataSource, HoverTool, HBox, VBox
from bokeh.models.widgets import Slider, Select, TextInput, RadioGroup, Paragraph, Div
from bokeh.io import curdoc, save
from bokeh.charts import HeatMap, bins, output_file, vplot, TimeSeries, Line
from bokeh.models import FixedTicker, SingleIntervalTicker, ColumnDataSource, DataRange1d
from bokeh.layouts import widgetbox
from bokeh.layouts import gridplot
import bokeh.palettes as palettes
from bokeh.resources import INLINE, CDN
import config
import pickle
import gzip
with gzip.open("../data/timeseries_features.pklz", "rb") as infile:
ts = pickle.load(infile)
dictionaries = sorted(ts.columns.levels[0])
resources = INLINE
colors=palettes.Paired10
def get_dataset(df, dictvalue, relative):
rel = (df.diff()/df*100).cumsum()
if relative:
selection = rel.sum()[dictvalue].sort_values(ascending=False).index
# check with tail(5) of df; or second half/third/quarter of df only
else:
selection = df.sum()[dictvalue].sort_values(ascending=False).index
selected = df[dictvalue][selection].fillna(0)
return selected
def prepare_facts(dictionaries):
factsets = {}
for dictionary in dictionaries:
absolutes = get_dataset(ts, dictionary, False)
trendings = get_dataset(ts, dictionary, True)
factsets[dictionary] = (absolutes, trendings)
return factsets
factsets = prepare_facts(dictionaries)
def get_subset(dictionary):
subset = factsets.get(dictionary)
return subset
def update(attrname, old, new):
subset = get_subset(dictchooser.value)
new_absolute_source = subset[0] \
.ix[:, :top_n.value] \
.groupby(pd.TimeGrouper(freq=timegroupoptionsmapper[timegroup.active])) \
.sum().fillna(0)
new_relative_source = subset[1] \
.ix[:, :top_n.value] \
.groupby(pd.TimeGrouper(freq=timegroupoptionsmapper[timegroup.active])) \
.sum().fillna(0)
for old, new in zip(abs_arrangement, new_absolute_source.columns.tolist()):
old.title.text = new
for old, new in zip(rel_arrangement, new_relative_source.columns.tolist()):
old.title.text = new
new_abs_sources = [ColumnDataSource(dict(date=new_absolute_source.index,
y=new_absolute_source[l]))
for l in new_absolute_source.columns.tolist()]
new_rel_sources = [ColumnDataSource(dict(date=new_relative_source.index,
y=new_relative_source[l]))
for l in new_relative_source.columns.tolist()]
for old, new in zip(abs_sources, new_abs_sources):
old.data.update(new.data)
for old, new in zip(rel_sources, new_rel_sources):
old.data.update(new.data)
new_abs_point_sources = [ColumnDataSource(dict(date=[new_absolute_source[l].idxmax()],
y=[new_absolute_source[l].max()],
text=[str(int(new_absolute_source[l].max()))]
)
)
for l in new_absolute_source.columns.tolist()]
new_rel_point_sources = [ColumnDataSource(dict(date=[new_relative_source[l].idxmax()],
y=[new_relative_source[l].max()],
text=[str(int(new_relative_source[l].max()))]
)
)
for l in new_relative_source.columns.tolist()]
for old, new in zip(abs_point_sources, new_abs_point_sources):
old.data.update(new.data)
for old, new in zip(rel_point_sources, new_rel_point_sources):
old.data.update(new.data)
# Create Input controls
timegroupoptionsmapper = {0:"A", 1:"M", 2:"D"}
trendingoptionsmapper = {0:False, 1:True}
timegroupoptions = ["Year", "Month", "Day"]
top_n = Slider(title="Number of top-n items to display", value=10, start=1, end=10, step=1)
dictchooser = Select(title="dictionaries", options=dictionaries, value=dictionaries[-2])
timegroup = RadioGroup(labels=timegroupoptions, active=0)
trending_chooser = RadioGroup(labels=["absolute counts", "period-to-period change"], active=0)
initial_subset = get_subset(dictchooser.value)
abs_sources = [ColumnDataSource(dict(date=initial_subset[0].index, y=initial_subset[0][l])) for l in initial_subset[0].columns.tolist()[:top_n.value]]
rel_sources = [ColumnDataSource(dict(date=initial_subset[1].index, y=initial_subset[1][l])) for l in initial_subset[1].columns.tolist()[:top_n.value]]
abs_point_sources = [ColumnDataSource(dict(date=[initial_subset[0][l].idxmax()],
y=[initial_subset[0][l].max()],
text=[str(int(initial_subset[0][l].max()))]
)
)
for l in initial_subset[0].columns.tolist()[:top_n.value]]
rel_point_sources = [ColumnDataSource(dict(date=[initial_subset[1][l].idxmax()],
y=[initial_subset[1][l].max()],
text=[str(int(initial_subset[0][l].max()))]
)
)
for l in initial_subset[1].columns.tolist()[:top_n.value]]
def make_plots(linesources, pointsources):
plots = []
i=0
for linesource, pointsource in zip(linesources, pointsources):
fig = Figure(title=None, toolbar_location=None, tools=[],
x_axis_type="datetime",
width=300, height=70)
fig.xaxis.visible = False
if i in [0, 9] :
fig.xaxis.visible = True
fig.height = 90
fig.yaxis.visible = False
fig.xgrid.visible = True
fig.ygrid.visible = False
fig.min_border_left = 10
fig.min_border_right = 10
fig.min_border_top = 5
fig.min_border_bottom = 5
if not i in [0, 9]:
fig.xaxis.major_label_text_font_size = "0pt"
#fig.yaxis.major_label_text_font_size = "0pt"
fig.xaxis.major_tick_line_color = None
fig.yaxis.major_tick_line_color = None
fig.xaxis.minor_tick_line_color = None
fig.yaxis.minor_tick_line_color = None
fig.background_fill_color = "whitesmoke"
fig.line(x='date', y="y", source=linesource)
fig.circle(x='date', y='y', size=5, source=pointsource)
fig.text(x='date', y='y', text='text', x_offset=5, y_offset=10, text_font_size='7pt', source=pointsource)
fig.title.align = 'left'
fig.title.text_font_style = 'normal'
plots.append(fig)
i+=1
return plots
abs_arrangement = make_plots(abs_sources, abs_point_sources)
rel_arrangement = make_plots(rel_sources, rel_point_sources)
dictchooser.on_change("value", update)
timegroup.on_change('active', update)
inputs = row(dictchooser, timegroup)
update(None, None, None) # initial load of the data
### LAYOUT
layout = column(inputs, row(column(abs_arrangement), column(rel_arrangement)))
curdoc().add_root(layout)
curdoc.title = "Exploring most frequent and uptrending facts"
| mit |
LFPy/LFPy | examples/bioRxiv281717/figure_6.py | 1 | 7293 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''plotting script for figure 6 in manuscript preprint on output of
example_parallel_network.py
Copyright (C) 2018 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import os
import numpy as np
import h5py
import example_parallel_network_plotting as plotting
from mpi4py import MPI
# set up MPI environment
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
fontsize = 14
titlesize = 16
legendsize = 12
plt.rcParams.update({
'axes.xmargin': 0.0,
'axes.ymargin': 0.0,
'axes.labelsize': fontsize,
'axes.titlesize': titlesize,
'figure.titlesize': fontsize,
'font.size': fontsize,
'legend.fontsize': legendsize,
})
if __name__ == '__main__':
# get simulation parameters
from example_parallel_network_parameters import PSET
# cell type colors
colors = [
plt.get_cmap(
'Set1',
PSET.populationParameters.size)(i) for i in range(
PSET.populationParameters.size)]
# time shown
T = (PSET.TRANSIENT, PSET.TRANSIENT + 1000.)
# Set up figure and subplots
fig = plt.figure(figsize=(16, 12))
gs = GridSpec(15, 5, left=0.075, right=0.975,
top=0.95, bottom=0.05, wspace=0.3, hspace=0.2)
alphabet = 'ABCDEFGHIJKLMNOPQ'
for j, (m_type, me_type) in enumerate(
zip(PSET.populationParameters['m_type'],
PSET.populationParameters['me_type'])):
ax = fig.add_subplot(gs[:8, j])
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
for data, title, color in zip(
[f['SUMMED_OUTPUT'][()][me_type]],
[m_type],
['k']):
ax.set_title(title)
vlimround = plotting.draw_lineplot(
ax=ax,
data=plotting.decimate(data, q=PSET.decimate_q),
dt=PSET.dt * PSET.decimate_q,
T=T, color=color,
scalebarbasis='log10')
if j > 0:
ax.set_yticklabels([])
ax.set_ylabel('')
ax.text(-0.1, 1.05, alphabet[j],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
p = f['CURRENT_DIPOLE_MOMENT'][me_type] * \
1E-3 # nA um -> 1E-3 nA m unit conversion
for i, (u, ls, lw, ylbl) in enumerate(zip(
['x', 'y', 'z'], ['-', '-', '-'], [1, 1, 1],
[r'$\mathbf{p \cdot \hat{x}}$' + '\n' + r'($10^{-3}$ nA m)',
r'$\mathbf{p \cdot \hat{y}}$' +
'\n' + r'($10^{-3}$ nA m)',
r'$\mathbf{p \cdot \hat{z}}$' + '\n' + r'($10^{-3}$ nA m)'])):
ax = fig.add_subplot(gs[9 + i * 2:11 + i * 2, j])
if i == 0:
ax.text(-0.1, 1.2, alphabet[j + 5],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
plotting.remove_axis_junk(ax)
x = plotting.decimate(p[i, ], q=PSET.decimate_q)
t = np.arange(x.size) * PSET.dt * PSET.decimate_q
inds = (t >= T[0]) & (t <= T[1])
ax.plot(t[inds], x[inds], ls=ls, lw=lw,
color='k')
if i != 2:
ax.set_xticklabels([])
if j == 0:
ax.set_ylabel(ylbl, labelpad=0)
f.close()
ax.set_xlabel('time (ms)', labelpad=0)
ax = fig.add_subplot(gs[:8, 4])
ax.set_title('signal variance')
y = PSET.electrodeParams['z']
tind = int(PSET.TRANSIENT / PSET.dt)
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
for m_type, me_type, color in zip(
list(
PSET.populationParameters['m_type']) + ['summed'], list(
PSET.populationParameters['me_type']) + ['imem'], colors + ['k']):
data = f['SUMMED_OUTPUT'][()][me_type]
ax.semilogx(data[:, tind:].var(axis=1), y,
lw=2, label=m_type, color=color)
f.close()
ax.set_yticks(y)
ax.set_yticklabels([])
ax.set_ylabel('')
plotting.remove_axis_junk(ax)
ax.axis(ax.axis('tight'))
ax.legend(loc='best')
ax.set_xlabel(r'variance (mV$^2$)', labelpad=0)
ax.text(-0.1, 1.05, alphabet[4],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
barhaxes = []
barhmax = []
barhmin = []
for i, (u, ls, lw, ylbl) in enumerate(zip(['x', 'y', 'z'], [
'-', '-', '-'], [1, 1, 1], [r'$p_x$', r'$p_y$', r'$p_z$'])):
ax = fig.add_subplot(gs[9 + i * 2:11 + i * 2, 4])
if i == 0:
ax.text(-0.1, 1.2, alphabet[9],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
plotting.remove_axis_junk(ax)
barhaxes.append(ax)
f = h5py.File(os.path.join(PSET.OUTPUTPATH,
'example_parallel_network_output.h5'), 'r')
bars = []
p_temp = np.zeros(f['CURRENT_DIPOLE_MOMENT'].shape)
for me_type in PSET.populationParameters['me_type']:
# nA um -> 1E-3 nA m unit conversion
bars.append((f['CURRENT_DIPOLE_MOMENT']
[me_type][i, tind:] * 1E-3).var())
p_temp += f['CURRENT_DIPOLE_MOMENT'][me_type]
f.close()
p_temp *= 1E-6 # nA um -> nA m unit conversion
bars.append(p_temp[i, tind:].var())
barhmax.append(np.array(bars).max())
barhmin.append(np.array(bars).min())
del p_temp
rects = ax.barh(range(len(bars)), bars, log=True, color=colors + ['k'])
if i != 2:
ax.set_xticklabels([])
ax.set_yticks([])
if i == 0:
for xpos, ypos, text in zip(bars, range(len(bars)), list(
PSET.populationParameters['m_type']) + ['summed']):
ax.text(xpos, ypos, text, ha='left', va='center')
ax.set_xlabel(r'variance (($10^{-3}$ nA m)$^2$)', labelpad=0)
for axh in barhaxes:
ax.axis(ax.axis('tight'))
axh.set_xlim(left=np.min(barhmin), right=np.max(barhmax))
fig.savefig(
os.path.join(
PSET.OUTPUTPATH,
'figure_6.pdf'),
bbox_inches='tight')
plt.show()
| gpl-3.0 |
scikit-learn-contrib/forest-confidence-interval | forestci/version.py | 2 | 2065 | # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 5
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "forestci: confidence intervals for scikit-learn "
description += "forest algorithms"
# Long description will go up on the pypi page
long_description = """
sklearn forest ci
=================
`forest-confidence-interval` is a Python module for calculating variance and
adding confidence intervals to scikit-learn random forest regression or
classification objects. The core functions calculate an in-bag and error bars
for random forest objects
Please read the repository README_ on Github or our documentation_
.. _README: https://github.com/scikit-learn-contrib/forest-confidence-interval/blob/master/README.md
.. _documentation: http://contrib.scikit-learn.org/forest-confidence-interval/
"""
NAME = "forestci"
MAINTAINER = "Ariel Rokem"
MAINTAINER_EMAIL = "arokem@uw.edu"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/scikit-learn-contrib/forest-confidence-interval"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Ariel Rokem, Bryna Hazelton, Kivan Polimis"
AUTHOR_EMAIL = "arokem@uw.edu"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
| mit |
YinongLong/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 53 | 13398 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b("""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""")
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985]]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X) | bsd-3-clause |
andrey-alekov/backtesting_eventbased | model/portfolio.py | 1 | 1158 | import datetime
import numpy as np
import pandas as od
import Queue
from abc import ABCMeta, abstractmethod
from math import floor
from event import FillEvent, OrderEvent
class Limit(object):
def __init__(self, size):
self.size = size
def set_limit(self, newlimit):
if newlimit>0:
self.size = newlimit
def get_limit(self):
return self.size
class Portfolio(object):
"""
Basic portfolio.
"""
__metaclass__ = ABCMeta
def __init__(self, name, limit):
self.name = name
self.risk_engine = None
self.agent_list = None
self.toggle = False
self.limit = limit
def toggle(self):
if self.toggle:
self.toggle = False
else:
self.toggle = True
@abstractmethod
def update_signal(self, event):
raise NotImplementedError("Should implement update_signal()")
@abstractmethod
def update_fill(self, event):
raise NotImplementedError("Should implement update_fill()")
class PortfolioAgent(object):
"""
Basic agent.
"""
def __init__(self):
pass | mit |
josenavas/QiiTa | qiita_db/test/test_processing_job.py | 1 | 45880 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from datetime import datetime
from os.path import join
from os import close
from tempfile import mkstemp
from json import dumps, loads
from time import sleep
import networkx as nx
import pandas as pd
import qiita_db as qdb
from qiita_core.util import qiita_test_checker
from qiita_core.qiita_settings import qiita_config
def _create_job(force=True):
job = qdb.processing_job.ProcessingJob.create(
qdb.user.User('test@foo.bar'),
qdb.software.Parameters.load(
qdb.software.Command(2),
values_dict={"min_seq_len": 100, "max_seq_len": 1000,
"trim_seq_length": False, "min_qual_score": 25,
"max_ambig": 6, "max_homopolymer": 6,
"max_primer_mismatch": 0,
"barcode_type": "golay_12",
"max_barcode_errors": 1.5,
"disable_bc_correction": False,
"qual_score_window": 0, "disable_primers": False,
"reverse_primers": "disable",
"reverse_primer_mismatches": 0,
"truncate_ambi_bases": False, "input_data": 1}),
force)
return job
@qiita_test_checker()
class ProcessingJobUtilTest(TestCase):
def test_system_call(self):
obs_out, obs_err, obs_status = qdb.processing_job._system_call(
'echo "Test system call stdout"')
self.assertEqual(obs_out, "Test system call stdout\n")
self.assertEqual(obs_err, "")
self.assertEqual(obs_status, 0)
def test_system_call_error(self):
obs_out, obs_err, obs_status = qdb.processing_job._system_call(
'>&2 echo "Test system call stderr"; exit 1')
self.assertEqual(obs_out, "")
self.assertEqual(obs_err, "Test system call stderr\n")
self.assertEqual(obs_status, 1)
def test_job_submitter(self):
# The cmd parameter of the function should be the command that
# actually executes the function. However, in order to avoid executing
# a expensive command, we are just going to pass some other command.
# In case of success, nothing happens, so we just run it and see that
# it doesn't raise an error
job = _create_job()
cmd = 'echo "Test system call stdout"'
qdb.processing_job._job_submitter(job.id, cmd)
def test_job_submitter_error(self):
# Same comment as above, but here we are going to force failure, and
# check that the job is updated correctly
job = _create_job()
cmd = '>&2 echo "Test system call stderr"; exit 1'
qdb.processing_job._job_submitter(job.id, cmd)
self.assertEqual(job.status, 'error')
exp = ("Error submitting job:\nStd output:\nStd error:"
"Test system call stderr\n")
self.assertEqual(job.log.msg, exp)
@qiita_test_checker()
class ProcessingJobTest(TestCase):
def setUp(self):
self.tester1 = qdb.processing_job.ProcessingJob(
"063e553b-327c-4818-ab4a-adfe58e49860")
self.tester2 = qdb.processing_job.ProcessingJob(
"bcc7ebcd-39c1-43e4-af2d-822e3589f14d")
self.tester3 = qdb.processing_job.ProcessingJob(
"b72369f9-a886-4193-8d3d-f7b504168e75")
self.tester4 = qdb.processing_job.ProcessingJob(
"d19f76ee-274e-4c1b-b3a2-a12d73507c55")
self._clean_up_files = []
def _get_all_job_ids(self):
sql = "SELECT processing_job_id FROM qiita.processing_job"
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql)
return qdb.sql_connection.TRN.execute_fetchflatten()
def _wait_for_job(self, job):
while job.status not in ('error', 'success'):
sleep(0.5)
def test_exists(self):
self.assertTrue(qdb.processing_job.ProcessingJob.exists(
"063e553b-327c-4818-ab4a-adfe58e49860"))
self.assertTrue(qdb.processing_job.ProcessingJob.exists(
"bcc7ebcd-39c1-43e4-af2d-822e3589f14d"))
self.assertTrue(qdb.processing_job.ProcessingJob.exists(
"b72369f9-a886-4193-8d3d-f7b504168e75"))
self.assertTrue(qdb.processing_job.ProcessingJob.exists(
"d19f76ee-274e-4c1b-b3a2-a12d73507c55"))
self.assertFalse(qdb.processing_job.ProcessingJob.exists(
"d19f76ee-274e-4c1b-b3a2-b12d73507c55"))
self.assertFalse(qdb.processing_job.ProcessingJob.exists(
"some-other-string"))
def test_user(self):
exp_user = qdb.user.User('test@foo.bar')
self.assertEqual(self.tester1.user, exp_user)
self.assertEqual(self.tester2.user, exp_user)
exp_user = qdb.user.User('shared@foo.bar')
self.assertEqual(self.tester3.user, exp_user)
self.assertEqual(self.tester4.user, exp_user)
def test_command(self):
cmd1 = qdb.software.Command(1)
cmd2 = qdb.software.Command(2)
cmd3 = qdb.software.Command(3)
self.assertEqual(self.tester1.command, cmd1)
self.assertEqual(self.tester2.command, cmd2)
self.assertEqual(self.tester3.command, cmd1)
self.assertEqual(self.tester4.command, cmd3)
def test_parameters(self):
json_str = (
'{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,'
'"sequence_max_n":0,"rev_comp_barcode":false,'
'"rev_comp_mapping_barcodes":false,"rev_comp":false,'
'"phred_quality_threshold":3,"barcode_type":"golay_12",'
'"max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}')
exp_params = qdb.software.Parameters.load(qdb.software.Command(1),
json_str=json_str)
self.assertEqual(self.tester1.parameters, exp_params)
json_str = (
'{"min_seq_len":100,"max_seq_len":1000,"trim_seq_length":false,'
'"min_qual_score":25,"max_ambig":6,"max_homopolymer":6,'
'"max_primer_mismatch":0,"barcode_type":"golay_12",'
'"max_barcode_errors":1.5,"disable_bc_correction":false,'
'"qual_score_window":0,"disable_primers":false,'
'"reverse_primers":"disable","reverse_primer_mismatches":0,'
'"truncate_ambi_bases":false,"input_data":1}')
exp_params = qdb.software.Parameters.load(qdb.software.Command(2),
json_str=json_str)
self.assertEqual(self.tester2.parameters, exp_params)
json_str = (
'{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,'
'"sequence_max_n":0,"rev_comp_barcode":false,'
'"rev_comp_mapping_barcodes":true,"rev_comp":false,'
'"phred_quality_threshold":3,"barcode_type":"golay_12",'
'"max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}')
exp_params = qdb.software.Parameters.load(qdb.software.Command(1),
json_str=json_str)
self.assertEqual(self.tester3.parameters, exp_params)
json_str = (
'{"reference":1,"sortmerna_e_value":1,"sortmerna_max_pos":10000,'
'"similarity":0.97,"sortmerna_coverage":0.97,"threads":1,'
'"input_data":2}')
exp_params = qdb.software.Parameters.load(qdb.software.Command(3),
json_str=json_str)
self.assertEqual(self.tester4.parameters, exp_params)
def test_input_artifacts(self):
exp = [qdb.artifact.Artifact(1)]
self.assertEqual(self.tester1.input_artifacts, exp)
self.assertEqual(self.tester2.input_artifacts, exp)
self.assertEqual(self.tester3.input_artifacts, exp)
exp = [qdb.artifact.Artifact(2)]
self.assertEqual(self.tester4.input_artifacts, exp)
def test_status(self):
self.assertEqual(self.tester1.status, 'queued')
self.assertEqual(self.tester2.status, 'running')
self.assertEqual(self.tester3.status, 'success')
self.assertEqual(self.tester4.status, 'error')
def test_generate_cmd(self):
obs = self.tester1._generate_cmd()
exp = ('qiita-plugin-launcher "source activate qiita" '
'"start_target_gene" "%s" '
'"063e553b-327c-4818-ab4a-adfe58e49860" "%s"'
% (qiita_config.base_url,
join(qdb.util.get_work_base_dir(),
"063e553b-327c-4818-ab4a-adfe58e49860")))
self.assertEqual(obs, exp)
def test_submit(self):
# In order to test a success, we need to actually run the job, which
# will mean to run split libraries, for example.
pass
def test_log(self):
self.assertIsNone(self.tester1.log)
self.assertIsNone(self.tester2.log)
self.assertIsNone(self.tester3.log)
self.assertEqual(self.tester4.log, qdb.logger.LogEntry(1))
def test_heartbeat(self):
self.assertIsNone(self.tester1.heartbeat)
self.assertEqual(self.tester2.heartbeat,
datetime(2015, 11, 22, 21, 00, 00))
self.assertEqual(self.tester3.heartbeat,
datetime(2015, 11, 22, 21, 15, 00))
self.assertEqual(self.tester4.heartbeat,
datetime(2015, 11, 22, 21, 30, 00))
def test_step(self):
self.assertIsNone(self.tester1.step)
self.assertEqual(self.tester2.step, 'demultiplexing')
self.assertIsNone(self.tester3.step)
self.assertEqual(self.tester4.step, 'generating demux file')
def test_children(self):
self.assertEqual(list(self.tester1.children), [])
self.assertEqual(list(self.tester3.children), [self.tester4])
def test_update_and_launch_children(self):
# In order to test a success, we need to actually run the children
# jobs, which will mean to run split libraries, for example.
pass
def test_create(self):
exp_command = qdb.software.Command(1)
json_str = (
'{"input_data": 1, "max_barcode_errors": 1.5, '
'"barcode_type": "golay_12", "max_bad_run_length": 3, '
'"rev_comp": false, "phred_quality_threshold": 3, '
'"rev_comp_barcode": false, "rev_comp_mapping_barcodes": false, '
'"min_per_read_length_fraction": 0.75, "sequence_max_n": 0, '
'"phred_offset": "auto"}')
exp_params = qdb.software.Parameters.load(exp_command,
json_str=json_str)
exp_user = qdb.user.User('test@foo.bar')
obs = qdb.processing_job.ProcessingJob.create(
exp_user, exp_params, True)
self.assertEqual(obs.user, exp_user)
self.assertEqual(obs.command, exp_command)
self.assertEqual(obs.parameters, exp_params)
self.assertEqual(obs.status, 'in_construction')
self.assertEqual(obs.log, None)
self.assertEqual(obs.heartbeat, None)
self.assertEqual(obs.step, None)
self.assertTrue(obs in qdb.artifact.Artifact(1).jobs())
# test with paramters with '
exp_command = qdb.software.Command(1)
exp_params.values["a tests with '"] = 'this is a tests with "'
exp_params.values['a tests with "'] = "this is a tests with '"
obs = qdb.processing_job.ProcessingJob.create(
exp_user, exp_params)
self.assertEqual(obs.user, exp_user)
self.assertEqual(obs.command, exp_command)
self.assertEqual(obs.status, 'in_construction')
self.assertEqual(obs.log, None)
self.assertEqual(obs.heartbeat, None)
self.assertEqual(obs.step, None)
self.assertTrue(obs in qdb.artifact.Artifact(1).jobs())
def test_set_status(self):
job = _create_job()
self.assertEqual(job.status, 'in_construction')
job._set_status('queued')
self.assertEqual(job.status, 'queued')
job._set_status('running')
self.assertEqual(job.status, 'running')
with self.assertRaises(qdb.exceptions.QiitaDBStatusError):
job._set_status('queued')
job._set_status('error')
self.assertEqual(job.status, 'error')
job._set_status('running')
self.assertEqual(job.status, 'running')
job._set_status('success')
self.assertEqual(job.status, 'success')
with self.assertRaises(qdb.exceptions.QiitaDBStatusError):
job._set_status('running')
def test_submit_error(self):
job = _create_job()
job._set_status('queued')
with self.assertRaises(
qdb.exceptions.QiitaDBOperationNotPermittedError):
job.submit()
def test_complete_multiple_outputs(self):
# This test performs the test of multiple functions at the same
# time. "release", "release_validators" and
# "_set_validator_jobs" are tested here for correct execution.
# Those functions are designed to work together, so it becomes
# really hard to test each of the functions individually for
# successfull execution.
# We need to create a new command with multiple outputs, since
# in the test DB there is no command with such characteristics
cmd = qdb.software.Command.create(
qdb.software.Software(1),
"TestCommand", "Test command",
{'input': ['artifact:["Demultiplexed"]', None]},
{'out1': 'BIOM', 'out2': 'BIOM'})
job = qdb.processing_job.ProcessingJob.create(
qdb.user.User('test@foo.bar'),
qdb.software.Parameters.load(
cmd,
values_dict={"input": 1}))
job._set_status("running")
fd, fp1 = mkstemp(suffix="_table.biom")
self._clean_up_files.append(fp1)
close(fd)
with open(fp1, 'w') as f:
f.write('\n')
fd, fp2 = mkstemp(suffix="_table.biom")
self._clean_up_files.append(fp2)
close(fd)
with open(fp2, 'w') as f:
f.write('\n')
# `job` has 2 output artifacts. Each of these artifacts needs to be
# validated by 2 different validation jobs. We are creating those jobs
# here, and add in the 'procenance' parameter that links the original
# jobs with the validator jobs.
params = qdb.software.Parameters.load(
qdb.software.Command(4),
values_dict={'template': 1, 'files': fp1,
'artifact_type': 'BIOM',
'provenance': dumps(
{'job': job.id,
'cmd_out_id': qdb.util.convert_to_id(
'out1', "command_output", "name"),
'name': 'out1'})})
user = qdb.user.User('test@foo.bar')
obs1 = qdb.processing_job.ProcessingJob.create(user, params, True)
obs1._set_status('running')
params = qdb.software.Parameters.load(
qdb.software.Command(4),
values_dict={'template': 1, 'files': fp2,
'artifact_type': 'BIOM',
'provenance': dumps(
{'job': job.id,
'cmd_out_id': qdb.util.convert_to_id(
'out1', "command_output", "name"),
'name': 'out1'})})
obs2 = qdb.processing_job.ProcessingJob.create(user, params, True)
obs2._set_status('running')
# Make sure that we link the original job with its validator jobs
job._set_validator_jobs([obs1, obs2])
artifact_data_1 = {'filepaths': [(fp1, 'biom')],
'artifact_type': 'BIOM'}
# Complete one of the validator jobs. This jobs should store all the
# information about the new artifact, but it does not create it. The
# job then goes to a "waiting" state, where it waits until all the
# validator jobs are completed.
obs1._complete_artifact_definition(artifact_data_1)
self.assertEqual(obs1.status, 'waiting')
self.assertEqual(job.status, 'running')
# When we complete the second validation job, the previous validation
# job is realeaed from its waiting state. All jobs then create the
# artifacts in a single transaction, so either all of them successfully
# complete, or all of them fail.
artifact_data_2 = {'filepaths': [(fp2, 'biom')],
'artifact_type': 'BIOM'}
obs2._complete_artifact_definition(artifact_data_2)
self.assertEqual(obs1.status, 'waiting')
self.assertEqual(obs2.status, 'waiting')
self.assertEqual(job.status, 'running')
job.release_validators()
self.assertEqual(obs1.status, 'success')
self.assertEqual(obs2.status, 'success')
self.assertEqual(job.status, 'success')
def test_complete_artifact_definition(self):
job = _create_job()
job._set_status('running')
fd, fp = mkstemp(suffix="_table.biom")
self._clean_up_files.append(fp)
close(fd)
with open(fp, 'w') as f:
f.write('\n')
artifact_data = {'filepaths': [(fp, 'biom')],
'artifact_type': 'BIOM'}
params = qdb.software.Parameters.load(
qdb.software.Command(4),
values_dict={'template': 1, 'files': fp,
'artifact_type': 'BIOM',
'provenance': dumps(
{'job': job.id,
'cmd_out_id': 3})}
)
obs = qdb.processing_job.ProcessingJob.create(
qdb.user.User('test@foo.bar'), params)
job._set_validator_jobs([obs])
obs._complete_artifact_definition(artifact_data)
self.assertEqual(obs.status, 'waiting')
self.assertEqual(job.status, 'running')
# Upload case implicitly tested by "test_complete_type"
def test_complete_artifact_transformation(self):
# Implicitly tested by "test_complete"
pass
def test_complete_no_artifact_data(self):
job = qdb.processing_job.ProcessingJob.create(
qdb.user.User('test@foo.bar'),
qdb.software.Parameters.load(
qdb.software.Command(5),
values_dict={"input_data": 1}))
job._set_status('running')
job.complete(True)
self.assertEqual(job.status, 'success')
job = qdb.processing_job.ProcessingJob.create(
qdb.user.User('test@foo.bar'),
qdb.software.Parameters.load(
qdb.software.Command(5),
values_dict={"input_data": 1}),
True)
job._set_status('running')
job.complete(False, error='Some Error')
self.assertEqual(job.status, 'error')
def test_complete_type(self):
fd, fp = mkstemp(suffix="_table.biom")
self._clean_up_files.append(fp)
close(fd)
with open(fp, 'w') as f:
f.write('\n')
exp_artifact_count = qdb.util.get_count('qiita.artifact') + 1
artifacts_data = {'ignored': {'filepaths': [(fp, 'biom')],
'artifact_type': 'BIOM'}}
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index',
dtype=str)
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
metadata, qdb.study.Study(1), "16S")
self._clean_up_files.extend([ptfp for _, ptfp in pt.get_filepaths()])
params = qdb.software.Parameters.load(
qdb.software.Command(4),
values_dict={'template': pt.id, 'files': fp,
'artifact_type': 'BIOM'})
obs = qdb.processing_job.ProcessingJob.create(
qdb.user.User('test@foo.bar'), params, True)
obs._set_status('running')
obs.complete(True, artifacts_data=artifacts_data)
self.assertEqual(obs.status, 'success')
self.assertEqual(qdb.util.get_count('qiita.artifact'),
exp_artifact_count)
self._clean_up_files.extend(
[afp for _, afp, _ in
qdb.artifact.Artifact(exp_artifact_count).filepaths])
def test_complete_success(self):
# This first part of the test is just to test that by default the
# naming of the output artifact will be the name of the output
fd, fp = mkstemp(suffix='_table.biom')
self._clean_up_files.append(fp)
close(fd)
with open(fp, 'w') as f:
f.write('\n')
artifacts_data = {'demultiplexed': {'filepaths': [(fp, 'biom')],
'artifact_type': 'BIOM'}}
job = _create_job()
job._set_status('running')
job.complete(True, artifacts_data=artifacts_data)
self._wait_for_job(job)
# Retrieve the job that is performing the validation:
val_job = qdb.processing_job.ProcessingJob(job.step.rsplit(" ", 1)[-1])
# Test the the output artifact is going to be named based on the
# input parameters
self.assertEqual(
loads(val_job.parameters.values['provenance'])['name'],
"demultiplexed")
# To test that the naming of the output artifact is based on the
# parameters that the command is indicating, we need to update the
# parameter information of the command - since the ones existing
# in the database currently do not require using any input parameter
# to name the output artifact
with qdb.sql_connection.TRN:
sql = """UPDATE qiita.command_parameter
SET name_order = %s
WHERE command_parameter_id = %s"""
# Hard-coded values; 19 -> barcode_type, 20 -> max_barcode_errors
qdb.sql_connection.TRN.add(sql, [[1, 19], [2, 20]], many=True)
qdb.sql_connection.TRN.execute()
fd, fp = mkstemp(suffix='_table.biom')
self._clean_up_files.append(fp)
close(fd)
with open(fp, 'w') as f:
f.write('\n')
artifacts_data = {'demultiplexed': {'filepaths': [(fp, 'biom')],
'artifact_type': 'BIOM'}}
job = _create_job()
job._set_status('running')
alljobs = set(self._get_all_job_ids())
job.complete(True, artifacts_data=artifacts_data)
# When completing the previous job, it creates a new job that needs
# to validate the BIOM table that is being added as new artifact.
# Hence, this job is still in running state until the validation job
# is completed. Note that this is tested by making sure that the status
# of this job is running, and that we have one more job than before
# (see assertEqual with len of all jobs)
self.assertEqual(job.status, 'running')
self.assertTrue(job.step.startswith(
'Validating outputs (1 remaining) via job(s)'))
obsjobs = set(self._get_all_job_ids())
# The complete call above submits 2 new jobs: the validator job and
# the release validators job. Hence the +2
self.assertEqual(len(obsjobs), len(alljobs) + 2)
self._wait_for_job(job)
# Retrieve the job that is performing the validation:
val_job = qdb.processing_job.ProcessingJob(job.step.rsplit(" ", 1)[-1])
# Test the the output artifact is going to be named based on the
# input parameters
self.assertEqual(
loads(val_job.parameters.values['provenance'])['name'],
"demultiplexed golay_12 1.5")
def test_complete_failure(self):
job = _create_job()
job.complete(False, error="Job failure")
self.assertEqual(job.status, 'error')
self.assertEqual(job.log,
qdb.logger.LogEntry.newest_records(numrecords=1)[0])
self.assertEqual(job.log.msg, 'Job failure')
# Test the artifact definition case
job = _create_job()
job._set_status('running')
params = qdb.software.Parameters.load(
qdb.software.Command(4),
values_dict={'template': 1, 'files': 'ignored',
'artifact_type': 'BIOM',
'provenance': dumps(
{'job': job.id,
'cmd_out_id': 3})}
)
obs = qdb.processing_job.ProcessingJob.create(
qdb.user.User('test@foo.bar'), params, True)
job._set_validator_jobs([obs])
obs.complete(False, error="Validation failure")
self.assertEqual(obs.status, 'error')
self.assertEqual(obs.log.msg, 'Validation failure')
self.assertEqual(job.status, 'running')
job.release_validators()
self.assertEqual(job.status, 'error')
self.assertEqual(
job.log.msg, '1 validator jobs failed: Validator %s '
'error message: Validation failure' % obs.id)
def test_complete_error(self):
with self.assertRaises(
qdb.exceptions.QiitaDBOperationNotPermittedError):
self.tester1.complete(True, artifacts_data={})
def test_set_error(self):
job1 = _create_job()
job1._set_status('queued')
job2 = _create_job()
job2._set_status('running')
for t in [job1, job2]:
t._set_error('Job failure')
self.assertEqual(t.status, 'error')
self.assertEqual(
t.log, qdb.logger.LogEntry.newest_records(numrecords=1)[0])
with self.assertRaises(
qdb.exceptions.QiitaDBOperationNotPermittedError):
self.tester3._set_error("Job failure")
def test_update_heartbeat_state(self):
job = _create_job()
job._set_status('running')
before = datetime.now()
job.update_heartbeat_state()
self.assertTrue(before < job.heartbeat < datetime.now())
job = _create_job()
job._set_status('queued')
before = datetime.now()
job.update_heartbeat_state()
self.assertTrue(before < job.heartbeat < datetime.now())
self.assertEqual(job.status, 'running')
with self.assertRaises(
qdb.exceptions.QiitaDBOperationNotPermittedError):
self.tester3.update_heartbeat_state()
def test_step_setter(self):
job = _create_job()
job._set_status('running')
job.step = 'demultiplexing'
self.assertEqual(job.step, 'demultiplexing')
job.step = 'generating demux file'
self.assertEqual(job.step, 'generating demux file')
with self.assertRaises(
qdb.exceptions.QiitaDBOperationNotPermittedError):
self.tester1.step = 'demultiplexing'
with self.assertRaises(
qdb.exceptions.QiitaDBOperationNotPermittedError):
self.tester3.step = 'demultiplexing'
with self.assertRaises(
qdb.exceptions.QiitaDBOperationNotPermittedError):
self.tester4.step = 'demultiplexing'
def test_update_children(self):
# Create a workflow so we can test this functionality
exp_command = qdb.software.Command(1)
json_str = (
'{"input_data": 1, "max_barcode_errors": 1.5, '
'"barcode_type": "golay_12", "max_bad_run_length": 3, '
'"rev_comp": false, "phred_quality_threshold": 3, '
'"rev_comp_barcode": false, "rev_comp_mapping_barcodes": false, '
'"min_per_read_length_fraction": 0.75, "sequence_max_n": 0, '
'"phred_offset": "auto"}')
exp_params = qdb.software.Parameters.load(exp_command,
json_str=json_str)
exp_user = qdb.user.User('test@foo.bar')
name = "Test processing workflow"
tester = qdb.processing_job.ProcessingWorkflow.from_scratch(
exp_user, exp_params, name=name, force=True)
parent = tester.graph.nodes()[0]
connections = {parent: {'demultiplexed': 'input_data'}}
dflt_params = qdb.software.DefaultParameters(10)
tester.add(dflt_params, connections=connections)
# we could get the child using tester.graph.nodes()[1] but networkx
# doesn't assure order so using the actual graph to get the child
child = nx.topological_sort(tester.graph)[1]
mapping = {1: 3}
obs = parent._update_children(mapping)
exp = [child]
self.assertTrue(obs, exp)
self.assertEqual(child.input_artifacts,
[qdb.artifact.Artifact(3)])
def test_outputs(self):
job = _create_job()
job._set_status('running')
QE = qdb.exceptions
with self.assertRaises(QE.QiitaDBOperationNotPermittedError):
job.outputs
fd, fp = mkstemp(suffix="_table.biom")
self._clean_up_files.append(fp)
close(fd)
with open(fp, 'w') as f:
f.write('\n')
artifact_data = {'filepaths': [(fp, 'biom')], 'artifact_type': 'BIOM'}
params = qdb.software.Parameters.load(
qdb.software.Command(4),
values_dict={'template': 1, 'files': fp,
'artifact_type': 'BIOM',
'provenance': dumps(
{'job': job.id,
'cmd_out_id': 3,
'name': 'outArtifact'})}
)
obs = qdb.processing_job.ProcessingJob.create(
qdb.user.User('test@foo.bar'), params, True)
job._set_validator_jobs([obs])
exp_artifact_count = qdb.util.get_count('qiita.artifact') + 1
obs._complete_artifact_definition(artifact_data)
job.release_validators()
self.assertEqual(job.status, 'success')
artifact = qdb.artifact.Artifact(exp_artifact_count)
obs = job.outputs
self.assertEqual(obs, {'OTU table': artifact})
self._clean_up_files.extend([afp for _, afp, _ in artifact.filepaths])
self.assertEqual(artifact.name, 'outArtifact')
def test_processing_job_workflow(self):
# testing None
job = qdb.processing_job.ProcessingJob(
"063e553b-327c-4818-ab4a-adfe58e49860")
self.assertIsNone(job.processing_job_workflow)
# testing actual workflow
job = qdb.processing_job.ProcessingJob(
"b72369f9-a886-4193-8d3d-f7b504168e75")
self.assertEqual(job.processing_job_workflow,
qdb.processing_job.ProcessingWorkflow(1))
# testing child job from workflow
job = qdb.processing_job.ProcessingJob(
'd19f76ee-274e-4c1b-b3a2-a12d73507c55')
self.assertEqual(job.processing_job_workflow,
qdb.processing_job.ProcessingWorkflow(1))
def test_hidden(self):
self.assertTrue(self.tester1.hidden)
self.assertTrue(self.tester2.hidden)
self.assertFalse(self.tester3.hidden)
self.assertTrue(self.tester4.hidden)
def test_hide(self):
QE = qdb.exceptions
# It's in a queued state
with self.assertRaises(QE.QiitaDBOperationNotPermittedError):
self.tester1.hide()
# It's in a running state
with self.assertRaises(QE.QiitaDBOperationNotPermittedError):
self.tester2.hide()
# It's in a success state
with self.assertRaises(QE.QiitaDBOperationNotPermittedError):
self.tester3.hide()
job = _create_job()
job._set_error('Setting to error for testing')
self.assertFalse(job.hidden)
job.hide()
self.assertTrue(job.hidden)
@qiita_test_checker()
class ProcessingWorkflowTests(TestCase):
def test_name(self):
self.assertEqual(qdb.processing_job.ProcessingWorkflow(1).name,
'Testing processing workflow')
def test_user(self):
self.assertEqual(qdb.processing_job.ProcessingWorkflow(1).user,
qdb.user.User('shared@foo.bar'))
def test_graph(self):
obs = qdb.processing_job.ProcessingWorkflow(1).graph
self.assertTrue(isinstance(obs, nx.DiGraph))
exp_nodes = [
qdb.processing_job.ProcessingJob(
'b72369f9-a886-4193-8d3d-f7b504168e75'),
qdb.processing_job.ProcessingJob(
'd19f76ee-274e-4c1b-b3a2-a12d73507c55')]
self.assertItemsEqual(obs.nodes(), exp_nodes)
self.assertEqual(obs.edges(), [(exp_nodes[0], exp_nodes[1])])
def test_graph_only_root(self):
obs = qdb.processing_job.ProcessingWorkflow(2).graph
self.assertTrue(isinstance(obs, nx.DiGraph))
exp_nodes = [
qdb.processing_job.ProcessingJob(
'ac653cb5-76a6-4a45-929e-eb9b2dee6b63')]
self.assertItemsEqual(obs.nodes(), exp_nodes)
self.assertEqual(obs.edges(), [])
def test_raise_if_not_in_construction(self):
# We just need to test that the execution continues (i.e. no raise)
tester = qdb.processing_job.ProcessingWorkflow(2)
tester._raise_if_not_in_construction()
def test_raise_if_not_in_construction_error(self):
tester = qdb.processing_job.ProcessingWorkflow(1)
with self.assertRaises(
qdb.exceptions.QiitaDBOperationNotPermittedError):
tester._raise_if_not_in_construction()
def test_submit(self):
# In order to test a success, we need to actually run the jobs, which
# will mean to run split libraries, for example.
pass
def test_from_default_workflow(self):
exp_user = qdb.user.User('test@foo.bar')
dflt_wf = qdb.software.DefaultWorkflow(1)
req_params = {qdb.software.Command(1): {'input_data': 1}}
name = "Test processing workflow"
obs = qdb.processing_job.ProcessingWorkflow.from_default_workflow(
exp_user, dflt_wf, req_params, name=name, force=True)
self.assertEqual(obs.name, name)
self.assertEqual(obs.user, exp_user)
obs_graph = obs.graph
self.assertTrue(isinstance(obs_graph, nx.DiGraph))
self.assertEqual(len(obs_graph.nodes()), 2)
obs_edges = obs_graph.edges()
self.assertEqual(len(obs_edges), 1)
obs_src = obs_edges[0][0]
obs_dst = obs_edges[0][1]
self.assertTrue(isinstance(obs_src, qdb.processing_job.ProcessingJob))
self.assertTrue(isinstance(obs_dst, qdb.processing_job.ProcessingJob))
self.assertTrue(obs_src.command, qdb.software.Command(1))
self.assertTrue(obs_dst.command, qdb.software.Command(1))
obs_params = obs_dst.parameters.values
exp_params = {
'input_data': [obs_src.id, u'demultiplexed'],
'reference': 1,
'similarity': 0.97,
'sortmerna_coverage': 0.97,
'sortmerna_e_value': 1,
'sortmerna_max_pos': 10000,
'threads': 1}
self.assertEqual(obs_params, exp_params)
exp_pending = {obs_src.id: {'input_data': 'demultiplexed'}}
self.assertEqual(obs_dst.pending, exp_pending)
def test_from_default_workflow_error(self):
with self.assertRaises(qdb.exceptions.QiitaDBError) as err:
qdb.processing_job.ProcessingWorkflow.from_default_workflow(
qdb.user.User('test@foo.bar'), qdb.software.DefaultWorkflow(1),
{}, name="Test name")
exp = ('Provided required parameters do not match the initial set of '
'commands for the workflow. Command(s) "Split libraries FASTQ"'
' are missing the required parameter set.')
self.assertEqual(str(err.exception), exp)
req_params = {qdb.software.Command(1): {'input_data': 1},
qdb.software.Command(2): {'input_data': 2}}
with self.assertRaises(qdb.exceptions.QiitaDBError) as err:
qdb.processing_job.ProcessingWorkflow.from_default_workflow(
qdb.user.User('test@foo.bar'), qdb.software.DefaultWorkflow(1),
req_params, name="Test name")
exp = ('Provided required parameters do not match the initial set of '
'commands for the workflow. Paramters for command(s) '
'"Split libraries" have been provided, but they are not the '
'initial commands for the workflow.')
self.assertEqual(str(err.exception), exp)
def test_from_scratch(self):
exp_command = qdb.software.Command(1)
json_str = (
'{"input_data": 1, "max_barcode_errors": 1.5, '
'"barcode_type": "golay_12", "max_bad_run_length": 3, '
'"rev_comp": false, "phred_quality_threshold": 3, '
'"rev_comp_barcode": false, "rev_comp_mapping_barcodes": false, '
'"min_per_read_length_fraction": 0.75, "sequence_max_n": 0, '
'"phred_offset": "auto"}')
exp_params = qdb.software.Parameters.load(exp_command,
json_str=json_str)
exp_user = qdb.user.User('test@foo.bar')
name = "Test processing workflow"
obs = qdb.processing_job.ProcessingWorkflow.from_scratch(
exp_user, exp_params, name=name, force=True)
self.assertEqual(obs.name, name)
self.assertEqual(obs.user, exp_user)
obs_graph = obs.graph
self.assertTrue(isinstance(obs_graph, nx.DiGraph))
nodes = obs_graph.nodes()
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].parameters, exp_params)
self.assertEqual(obs_graph.edges(), [])
def test_add(self):
exp_command = qdb.software.Command(1)
json_str = (
'{"input_data": 1, "max_barcode_errors": 1.5, '
'"barcode_type": "golay_12", "max_bad_run_length": 3, '
'"rev_comp": false, "phred_quality_threshold": 3, '
'"rev_comp_barcode": false, "rev_comp_mapping_barcodes": false, '
'"min_per_read_length_fraction": 0.75, "sequence_max_n": 0, '
'"phred_offset": "auto"}')
exp_params = qdb.software.Parameters.load(exp_command,
json_str=json_str)
exp_user = qdb.user.User('test@foo.bar')
name = "Test processing workflow"
obs = qdb.processing_job.ProcessingWorkflow.from_scratch(
exp_user, exp_params, name=name, force=True)
parent = obs.graph.nodes()[0]
connections = {parent: {'demultiplexed': 'input_data'}}
dflt_params = qdb.software.DefaultParameters(10)
obs.add(dflt_params, connections=connections, force=True)
obs_graph = obs.graph
self.assertTrue(isinstance(obs_graph, nx.DiGraph))
obs_nodes = obs_graph.nodes()
self.assertEqual(len(obs_nodes), 2)
obs_edges = obs_graph.edges()
self.assertEqual(len(obs_edges), 1)
obs_src = obs_edges[0][0]
obs_dst = obs_edges[0][1]
self.assertEqual(obs_src, parent)
self.assertTrue(isinstance(obs_dst, qdb.processing_job.ProcessingJob))
obs_params = obs_dst.parameters.values
exp_params = {
'input_data': [parent.id, u'demultiplexed'],
'reference': 1,
'similarity': 0.97,
'sortmerna_coverage': 0.97,
'sortmerna_e_value': 1,
'sortmerna_max_pos': 10000,
'threads': 1}
self.assertEqual(obs_params, exp_params)
# Adding a new root job
# This also tests that the `graph` property returns the graph correctly
# when there are root nodes that don't have any children
dflt_params = qdb.software.DefaultParameters(1)
obs.add(dflt_params, req_params={'input_data': 1}, force=True)
obs_graph = obs.graph
self.assertTrue(isinstance(obs_graph, nx.DiGraph))
root_obs_nodes = obs_graph.nodes()
self.assertEqual(len(root_obs_nodes), 3)
obs_edges = obs_graph.edges()
self.assertEqual(len(obs_edges), 1)
obs_new_jobs = set(root_obs_nodes) - set(obs_nodes)
self.assertEqual(len(obs_new_jobs), 1)
obs_job = obs_new_jobs.pop()
exp_params = {'barcode_type': u'golay_12',
'input_data': 1,
'max_bad_run_length': 3,
'max_barcode_errors': 1.5,
'min_per_read_length_fraction': 0.75,
'phred_quality_threshold': 3,
'rev_comp': False,
'rev_comp_barcode': False,
'rev_comp_mapping_barcodes': False,
'sequence_max_n': 0,
'phred_offset': 'auto'}
self.assertEqual(obs_job.parameters.values, exp_params)
def test_add_error(self):
with self.assertRaises(
qdb.exceptions.QiitaDBOperationNotPermittedError):
qdb.processing_job.ProcessingWorkflow(1).add({}, None)
def test_remove(self):
exp_command = qdb.software.Command(1)
json_str = (
'{"input_data": 1, "max_barcode_errors": 1.5, '
'"barcode_type": "golay_12", "max_bad_run_length": 3, '
'"rev_comp": false, "phred_quality_threshold": 3, '
'"rev_comp_barcode": false, "rev_comp_mapping_barcodes": false, '
'"min_per_read_length_fraction": 0.75, "sequence_max_n": 0,'
'"phred_offset": "auto"}')
exp_params = qdb.software.Parameters.load(exp_command,
json_str=json_str)
exp_user = qdb.user.User('test@foo.bar')
name = "Test processing workflow"
tester = qdb.processing_job.ProcessingWorkflow.from_scratch(
exp_user, exp_params, name=name, force=True)
parent = tester.graph.nodes()[0]
connections = {parent: {'demultiplexed': 'input_data'}}
dflt_params = qdb.software.DefaultParameters(10)
tester.add(dflt_params, connections=connections)
self.assertEqual(len(tester.graph.nodes()), 2)
tester.remove(tester.graph.edges()[0][1])
g = tester.graph
obs_nodes = g.nodes()
self.assertEqual(len(obs_nodes), 1)
self.assertEqual(obs_nodes[0], parent)
self.assertEqual(g.edges(), [])
# Test with cascade = true
exp_user = qdb.user.User('test@foo.bar')
dflt_wf = qdb.software.DefaultWorkflow(1)
req_params = {qdb.software.Command(1): {'input_data': 1}}
name = "Test processing workflow"
tester = qdb.processing_job.ProcessingWorkflow.from_default_workflow(
exp_user, dflt_wf, req_params, name=name, force=True)
tester.remove(tester.graph.edges()[0][0], cascade=True)
self.assertEqual(tester.graph.nodes(), [])
def test_remove_error(self):
with self.assertRaises(
qdb.exceptions.QiitaDBOperationNotPermittedError):
qdb.processing_job.ProcessingWorkflow(1).remove(
qdb.processing_job.ProcessingJob(
'b72369f9-a886-4193-8d3d-f7b504168e75'))
exp_user = qdb.user.User('test@foo.bar')
dflt_wf = qdb.software.DefaultWorkflow(1)
req_params = {qdb.software.Command(1): {'input_data': 1}}
name = "Test processing workflow"
tester = qdb.processing_job.ProcessingWorkflow.from_default_workflow(
exp_user, dflt_wf, req_params, name=name, force=True)
with self.assertRaises(
qdb.exceptions.QiitaDBOperationNotPermittedError):
tester.remove(tester.graph.edges()[0][0])
@qiita_test_checker()
class ProcessingJobDuplicated(TestCase):
def test_create_duplicated(self):
job = _create_job()
job._set_status('success')
with self.assertRaisesRegexp(ValueError, 'Cannot create job because '
'the parameters are the same as jobs '
'that are queued, running or already '
'have succeeded:') as context:
_create_job(False)
# If it failed it's because we have jobs in non finished status so
# setting them as error. This is basically testing that the duplicated
# job creation allows to create if all jobs are error and if success
# that the job doesn't have children
for jobs in context.exception.message.split('\n')[1:]:
jid, status = jobs.split(': ')
if status != 'success':
qdb.processing_job.ProcessingJob(jid)._set_status('error')
_create_job(False)
if __name__ == '__main__':
main()
| bsd-3-clause |
gajduk/greedy-tsp | optimal_greedy_error.py | 1 | 1789 | import random
import matplotlib.pyplot as plt
from core import greedy,optimal
def reproduce_greedy_with_error():
size = [1000.0,1000.0]
start = [e/2 for e in size]
n_diffs = []
repeats = 1000
n = 10
filename = "correct_res_batch_2_"+str(n)+"_"+str(repeats)+".txt"
with open(filename,"w") as f:
pass
error_variances = [e*.04 for e in range(6,11)]
for error_variance in error_variances:
diffs = []
for k in range(repeats):
print error_variance,str(k+1)+"/"+str(repeats)
coords = [[random.random()*e for e in size] for i in range(n)]
greedy_dist,greedy_order = greedy(start,coords,error_variance)
optimal_dist,optimal_order = optimal(start,coords,greedy_dist)
diff = (greedy_dist-optimal_dist)/optimal_dist*100.0
diffs.append(diff)
with open(filename,"a") as f:
f.write(str(error_variance)+":"+",".join([str(e) for e in diffs])+"\n")
n_diffs.append(diffs)
print [sum(diffs)/repeats for diffs in n_diffs]
def load_data_and_plot():
filename = "correct_res_10_1000.txt"
repeats = 1000
n_diffs = []
sigmas = [e*0.04 for e in range(11)]
with open(filename,"r") as pin:
for line in pin:
sigma,diff_s = line.split(":")
diffs = [float(e) for e in diff_s.split(",")]
diffs.sort()
n_diffs.append(diffs)
plt.figure(figsize=(5,4))
plt.boxplot(n_diffs)
plt.xlabel('$\sigma$')
labels = [str(e) if i%2==0 else '' for i,e in enumerate(sigmas)]
labels.insert(0,'')
plt.xticks(range(len(sigmas)+1), labels)
plt.ylabel('Difference - %')
plt.grid()
plt.xlim([.5,len(sigmas)+.5])
plt.ylim([-0.2,50.2])
plt.yticks([e*5 for e in range(0,11)],[''if e%2==1 else str(e*5) for e in range(0,11) ])
plt.tight_layout()
plt.savefig('sigma_diff.png')
load_data_and_plot() | mit |
alshedivat/tensorflow | tensorflow/examples/learn/text_classification_character_rnn.py | 38 | 4036 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of recurrent neural networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_rnn_model(features, labels, mode):
"""Character level recurrent neural network model to predict classes."""
byte_vectors = tf.one_hot(features[CHARS_FEATURE], 256, 1., 0.)
byte_list = tf.unstack(byte_vectors, axis=1)
cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
_, encoding = tf.nn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = tf.estimator.Estimator(model_fn=char_rnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=128,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Eval.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
kjung/scikit-learn | examples/applications/plot_stock_market.py | 76 | 8522 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
MartinSavc/scikit-learn | sklearn/neighbors/approximate.py | 128 | 22351 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.