repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
archman/phantasy | phantasy/tools/impact_model.py | 1 | 8243 | # encoding: UTF-8
"""
Implement phytool command 'impact-model'.
"""
from __future__ import print_function
import os
import sys
import logging
import traceback
import shutil
from argparse import ArgumentParser
import matplotlib.pyplot as plt
from phantasy.library.lattice.impact import OUTPUT_MODE_END
from phantasy.library.lattice.impact import build_lattice
from phantasy.library.lattice.impact import run_lattice
from phantasy.library.model.impact import build_result
from .common import loadMachineConfig
from .common import loadLatticeConfig
from .common import loadLayout
from .common import loadSettings
parser = ArgumentParser(prog=os.path.basename(sys.argv[0])+" impact-model",
description="Run IMPACT model and produce results")
parser.add_argument("-v", dest="verbosity", nargs='?', type=int, const=1, default=0, help="set the amount of output")
parser.add_argument("--mach", dest="machine", help="name of machine or path of machine directory")
parser.add_argument("--subm", dest="submach", help="name of segment")
parser.add_argument("--layout", dest="layoutpath", help="path of accelerator layout file (.csv)")
parser.add_argument("--settings", dest="settingspath", help="path to accelerator settings file (.json)")
parser.add_argument("--config", dest="configpath", help="path to accelerator configuration file (.ini)")
parser.add_argument("--start", help="name of accelerator element to start processing")
parser.add_argument("--end", help="name of accelerator element to end processing")
parser.add_argument("--data", dest="datapath", help="path to directory with IMPACT data")
parser.add_argument("--work", dest="workpath", help="path to directory for executing IMPACT")
parser.add_argument("--plot", action="store_true", help="generate a plot of the model")
parser.add_argument("resultpath", nargs='?', help="path to write resulting model data")
print_help = parser.print_help
def main():
"""
Entry point for command 'impact-model'.
"""
args = parser.parse_args(sys.argv[2:])
def rm_temp_dir(path):
if args.workpath == None:
shutil.rmtree(path)
if args.verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
elif args.verbosity > 1:
logging.getLogger().setLevel(logging.DEBUG)
if (args.resultpath != None) and os.path.exists(args.resultpath):
print("Error: destination result path already exists:", args.resultpath, file=sys.stderr)
return 1
try:
mconfig, submach = loadMachineConfig(args.machine, args.submach)
except Exception as e:
if args.verbosity > 0: traceback.print_exc()
print("Error loading machine configuration:", e, file=sys.stderr)
return 1
try:
layout = loadLayout(args.layoutpath, mconfig, submach)
except Exception as e:
if args.verbosity > 0: traceback.print_exc()
print("Error loading layout:", e, file=sys.stderr)
return 1
try:
settings = loadSettings(args.settingspath, mconfig, submach)
except Exception as e:
if args.verbosity > 0: traceback.print_exc()
print("Error loading settings:", e, file=sys.stderr)
return 1
try:
config = loadLatticeConfig(args.configpath, mconfig, submach)
except Exception as e:
if args.verbosity > 0: traceback.print_exc()
print("Error loading configuration:", e, file=sys.stderr)
return 1
try:
lattice = build_lattice(layout, config=config, settings=settings, start=args.start, end=args.end)
except Exception as e:
if args.verbosity > 0: traceback.print_exc()
print("Error building lattice:", e, file=sys.stderr)
return 1
lattice.outputMode = OUTPUT_MODE_END
try:
result_dir = run_lattice(lattice, config=config, data_dir=args.datapath, work_dir=args.workpath)
except Exception as e:
if args.verbosity > 0: traceback.print_exc()
print("Error running lattice:", e, file=sys.stderr)
return 1
try:
result = build_result(impact="FRIB", directory=result_dir, keep=True)
except Exception as e:
if args.verbosity > 0: traceback.print_exc()
print("Error building result:", e, file=sys.stderr)
rm_temp_dir(result_dir)
return 1
spos = result.getSPosition()
energy = result.getEnergy()
xorbit = result.getOrbit("X")
yorbit = result.getOrbit("Y")
xrms = result.getBeamRms("X")
yrms = result.getBeamRms("Y")
#zrms = result.getBeamRms("Z")
xalpha = result.getTwissAlpha("X")
yalpha = result.getTwissAlpha("Y")
xemit = result.getEmittance("X")
yemit = result.getEmittance("Y")
zemit = result.getEmittance("Z")
if args.plot:
try:
plt.figure(figsize=(16,10), dpi=80)
plt.subplot(221)
plt.title("Beam Orbit")
plt.plot(spos, xorbit, 'r-', label="X")
plt.plot(spos, yorbit, 'b-', label="Y")
plt.xlabel("S [m]")
plt.ylabel("Beam Position [m]")
plt.legend(loc="upper left")
plt.grid()
plt.subplot(222)
plt.title("Beam RMS")
plt.plot(spos, xrms, 'r-', label="X")
plt.plot(spos, yrms, 'b-', label="Y")
#plt.plot(zrms[:,0], zrms[:,1], 'g-', label="Z")
plt.xlabel("S [m]")
plt.ylabel("Beam RMS [m]")
plt.legend(loc="upper left")
plt.grid()
plt.subplot(223)
plt.title("Beam Energy")
plt.plot(spos, energy, 'r-')
plt.xlabel("S [m]")
plt.ylabel("Beam Energy [MeV]")
plt.grid()
plt.subplot(224)
plt.title("Beam Emittance")
plt.plot(spos, xemit, 'r-', label="X")
plt.plot(spos, yemit, 'b-', label="Y")
#plt.plot(zemit[:,0], zemit[:,1], 'g-', label="Z")
plt.xlabel("S [m]")
plt.ylabel("Beam Emittance [m-rad]")
plt.legend(loc="upper left")
plt.grid()
if args.resultpath == None:
plt.show()
else:
plt.savefig(args.resultpath)
except Exception as e:
if args.verbosity > 0: traceback.print_exc()
print("Error generating plot: ", e, file=sys.stderr)
else:
try:
if args.resultpath == None:
csvfile = sys.stdout
else:
csvfile = open(args.resultpath, "w")
csvfile.write("# i name s energy codx cody rmsx rmsy alphax alphay emittancex emittancey emittancez TM\r\n")
csvfile.write("# [m] [eV] [m] [m] [m] [m] \r\n")
for idx in xrange(len(lattice.elements)):
csvfile.write(str(idx))
csvfile.write(" ")
csvfile.write(lattice.elements[idx].name)
csvfile.write(" ")
csvfile.write(str(spos[idx]))
csvfile.write(" ")
csvfile.write(str(energy[idx]))
csvfile.write(" ")
csvfile.write(str(xorbit[idx]))
csvfile.write(" ")
csvfile.write(str(yorbit[idx]))
csvfile.write(" ")
csvfile.write(str(xrms[idx]))
csvfile.write(" ")
csvfile.write(str(yrms[idx]))
csvfile.write(" ")
csvfile.write(str(xalpha[idx]))
csvfile.write(" ")
csvfile.write(str(yalpha[idx]))
csvfile.write(" ")
csvfile.write(str(xemit[idx]))
csvfile.write(" ")
csvfile.write(str(yemit[idx]))
csvfile.write(" ")
csvfile.write(str(zemit[idx]))
csvfile.write(" ")
csvfile.write("0.0")
csvfile.write("\r\n")
except Exception as e:
print("Error writing CSV result: ", e, file=sys.stderr)
finally:
if csvfile != sys.stdout: csvfile.close()
rm_temp_dir(result_dir)
return 0
| bsd-3-clause |
codorkh/infratopo | topo_input_files/alps/alp_profile.py | 1 | 1997 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 8 14:36:32 2016
@author: dgreen
"""
# alp_profile.py
# PLotting the alpine profile, chosen to give a relatively 'up and over' profile
# from the coordinates 44.27N 10.60E
# Load in the data
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def read_profile(filename):
data = pd.io.parsers.read_csv(filename, sep=r'\s*',names=['lon','lat','dist','alt'])
return data
def read_2D_profile(filename):
data = pd.io.parsers.read_csv(filename, sep=r'\s*',names=['dist','alt'])
return data
para = {'axes.labelsize': 18, 'text.fontsize': 18, 'legend.fontsize': 13, 'xtick.labelsize': 16,'ytick.labelsize': 16, 'figure.subplot.left': 0.12, 'figure.subplot.right': 0.98, 'figure.subplot.bottom': 0.11, 'figure.subplot.top': 0.97}
plt.rcParams.update(para)
dirpath = '/Users/dgreen/Documents/Work/4codor/infratopo/topo_input_files/alps/'
profiledata = read_profile(dirpath+'alp.xydz')
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_axes([0.15,0.15,0.8,0.75])
ax1.plot(profiledata['dist'],profiledata['alt'],'k-')
ax1.set_xlabel('Distance (km)')
ax1.set_ylabel('Elevation (m)')
fig.savefig(dirpath+'alpine_profile.png',bbox_inches='tight')
rzfile = 'alp_2d.dat'
fL1 = open(dirpath+rzfile,'w')
for x in range(len(profiledata['dist'])):
fL1.write('{:8.1f} {:7.3f}\n'.format(profiledata.iloc[x]['dist']*1000.,profiledata.iloc[x]['alt']))
fL1.close()
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_axes([0.15,0.15,0.8,0.75])
ax1.plot(profiledata['dist'],profiledata['alt'],'k-',label='Alps')
ax1.set_xlabel('Distance (km)')
ax1.set_ylabel('Elevation (m)')
profilegaussdata = read_2D_profile('/Users/dgreen/Documents/Work/4codor/infratopo/topo_input_files/synthetics/gauss_3000m_hill_long.dat')
ax1.plot(profilegaussdata['dist']/1000.,profilegaussdata['alt'],'r-',label='Gaussian Synthetic')
ax1.legend(loc=1)
fig.savefig(dirpath+'alpine_profile_compare.png',bbox_inches='tight') | mit |
odyaka341/nmrglue | examples/plotting/2d_boxes/plot_assignments.py | 10 | 1327 | #! /usr/bin/env python
# Create contour plots of a spectrum with each peak in limits.in labeled
import nmrglue as ng
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm
# plot parameters
cmap = matplotlib.cm.Blues_r # contour map (colors to use for contours)
contour_start = 30000 # contour level start value
contour_num = 20 # number of contour levels
contour_factor = 1.20 # scaling factor between contour levels
textsize = 6 # text size of labels
# calculate contour levels
cl = [contour_start*contour_factor**x for x in xrange(contour_num)]
# read in the data from a NMRPipe file
dic,data = ng.pipe.read("../../common_data/2d_pipe/test.ft2")
# read in the integration limits
peak_list = np.recfromtxt("limits.in")
# create the figure
fig = plt.figure()
ax = fig.add_subplot(111)
# plot the contours
ax.contour(data,cl,cmap=cmap,extent=(0,data.shape[1]-1,0,data.shape[0]-1))
# loop over the peaks
for name,x0,y0,x1,y1 in peak_list:
if x0>x1:
x0,x1 = x1,x0
if y0>y1:
y0,y1 = y1,y0
# plot a box around each peak and label
ax.plot([x0,x1,x1,x0,x0],[y0,y0,y1,y1,y0],'k')
ax.text(x1+1,y0,name,size=textsize,color='r')
# set limits
ax.set_xlim(1900,2200)
ax.set_ylim(750,1400)
# save the figure
fig.savefig("assignments.png")
| bsd-3-clause |
cbertinato/pandas | pandas/core/groupby/generic.py | 1 | 59632 | """
Define the SeriesGroupBy and DataFrameGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
from collections import OrderedDict, abc, namedtuple
import copy
from functools import partial
from textwrap import dedent
import typing
from typing import Any, Callable, FrozenSet, Iterator, List, Type, Union
import warnings
import numpy as np
from pandas._libs import Timestamp, lib
from pandas.compat import PY36
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_int64, ensure_platform_int, is_bool, is_datetimelike,
is_integer_dtype, is_interval_dtype, is_numeric_dtype, is_scalar)
from pandas.core.dtypes.missing import isna, notna
from pandas._typing import FrameOrSeries
import pandas.core.algorithms as algorithms
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy, _apply_docs, _transform_template)
from pandas.core.index import Index, MultiIndex
import pandas.core.indexes.base as ibase
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.sparse.frame import SparseDataFrame
from pandas.plotting import boxplot_frame_groupby
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
def whitelist_method_generator(base_class: Type[GroupBy],
klass: Type[FrameOrSeries],
whitelist: FrozenSet[str],
) -> Iterator[str]:
"""
Yields all GroupBy member defs for DataFrame/Series names in whitelist.
Parameters
----------
base_class : Groupby class
base class
klass : DataFrame or Series class
class where members are defined.
whitelist : frozenset
Set of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""%(doc)s\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(base_class, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ''
wrapper_template = property_wrapper_template
params = {'name': name, 'doc': doc}
yield wrapper_template % params
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, alt=None, numeric_only=True,
min_count=-1):
new_items, new_blocks = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only, min_count=min_count)
return self._wrap_agged_blocks(new_items, new_blocks)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, alt=None, numeric_only=True,
min_count=-1):
# TODO: the actual managing of mgr_locs is a PITA
# here, it should happen via BlockManager.combine
data, agg_axis = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
new_blocks = []
new_items = []
deleted_items = []
for block in data.blocks:
locs = block.mgr_locs.as_array
try:
result, _ = self.grouper.aggregate(
block.values, how, axis=agg_axis, min_count=min_count)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
deleted_items.append(locs)
continue
# call our grouper again with only this block
from pandas.core.groupby.groupby import groupby
obj = self.obj[data.items[locs]]
s = groupby(obj, self.grouper)
result = s.aggregate(lambda x: alt(x, axis=self.axis))
finally:
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = block.make_block(result)
new_items.append(locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
# reset the locs in the blocks to correspond to our
# current ordering
indexer = np.concatenate(new_items)
new_items = data.items.take(np.sort(indexer))
if len(deleted_items):
# we need to adjust the indexer to account for the
# items we have removed
# really should be done in internals :<
deleted = np.concatenate(deleted_items)
ai = np.arange(len(data))
mask = np.zeros(len(data))
mask[deleted] = 1
indexer = (ai - mask.cumsum())[indexer]
offset = 0
for b in new_blocks:
loc = len(b.mgr_locs)
b.mgr_locs = indexer[offset:(offset + loc)]
offset += loc
return new_items, new_blocks
def aggregate(self, func, *args, **kwargs):
_level = kwargs.pop('_level', None)
relabeling = func is None and _is_multi_agg_with_relabel(**kwargs)
if relabeling:
func, columns, order = _normalize_keyword_aggregation(kwargs)
kwargs = {}
elif func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of "
"'(column, aggfunc).")
result, how = self._aggregate(func, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[func], _level=_level, _axis=self.axis)
result.columns = Index(
result.columns.levels[0],
name=self._selected_obj.columns.name)
if isinstance(self.obj, SparseDataFrame):
# Backwards compat for groupby.agg() with sparse
# values. concat no longer converts DataFrame[Sparse]
# to SparseDataFrame, so we do it here.
result = SparseDataFrame(result._data)
except Exception:
result = self._aggregate_generic(func, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
if relabeling:
result = result[order]
result.columns = columns
return result._convert(datetime=True)
agg = aggregate
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = OrderedDict()
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = OrderedDict()
cannot_agg = []
errors = None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
cast = self._transform_should_cast(func)
result[item] = colg.aggregate(func, *args, **kwargs)
if cast:
result[item] = self._try_cast(result[item], data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors = e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
from pandas.core.tools.numeric import to_numeric
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_not_none(values):
try:
return next(com._not_none(*values))
except StopIteration:
return None
v = first_not_none(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = self.grouper.result_index
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_not_none(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = {v.name for v in values}
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if (isinstance(v.index, MultiIndex) or
key_index is None or
isinstance(key_index, MultiIndex)):
stacked_values = np.vstack([
np.asarray(v) for v in values
])
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.core.reshape.concat import concat
result = concat(values, keys=key_index,
names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(v)
for v in values])
result = DataFrame(stacked_values.T, index=v.index,
columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index,
name=self._selection_name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if so.ndim == 2 and so.dtypes.apply(is_datetimelike).any():
result = result.apply(
lambda x: to_numeric(x, errors='ignore'))
date_cols = self._selected_obj.select_dtypes(
include=['datetime', 'timedelta']).columns
date_cols = date_cols.intersection(result.columns)
result[date_cols] = (result[date_cols]
._convert(datetime=True,
coerce=True))
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
# values are not series or array-like but scalars
else:
# only coerce dates if we find at least 1 datetime
coerce = any(isinstance(x, Timestamp) for x in values)
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
return (Series(values, index=key_index)
._convert(datetime=True,
coerce=coerce))
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError:
msg = 'transform must return a scalar value for each group'
raise ValueError(msg)
else:
res = path(group)
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = DataFrame(
np.concatenate([res.values] * len(group.index)
).reshape(group.shape),
columns=group.columns, index=group.index)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass='DataFrame', selected='')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
# optimized transforms
func = self._is_cython_func(func) or func
if isinstance(func, str):
if func in base.cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
return self._transform_fast(result, obj, func)
def _transform_fast(self, result, obj, func_nm):
"""
Fast transform path for aggregations
"""
# if there were groups with no observations (Categorical only?)
# try casting data to original dtype
cast = self._transform_should_cast(func_nm)
# for each col, reshape to to size of original frame
# by take operation
ids, _, ngroup = self.grouper.group_info
output = []
for i, _ in enumerate(result.columns):
res = algorithms.take_1d(result.iloc[:, i].values, ids)
if cast:
res = self._try_cast(res, obj.iloc[:, i])
output.append(res)
return DataFrame._from_arrays(output, columns=result.columns,
index=obj.index)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, str):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# verify fast path does not change columns (and names), otherwise
# its results cannot be joined with those of the slow path
if res_fast.columns != group.columns:
return path, res
# verify numerical equality with the slow path
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notna(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except Exception:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = base.series_apply_whitelist
for _def_str in whitelist_method_generator(
GroupBy, Series, _apply_whitelist):
exec(_def_str)
@property
def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
_agg_see_also_doc = dedent("""
See Also
--------
pandas.Series.groupby.apply
pandas.Series.groupby.transform
pandas.Series.aggregate
""")
_agg_examples_doc = dedent("""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4
""")
@Appender(_apply_docs['template']
.format(input='series',
examples=_apply_docs['series_examples']))
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
@Substitution(see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded='',
klass='Series',
axis='')
@Appender(_shared_docs['aggregate'])
def aggregate(self, func_or_funcs=None, *args, **kwargs):
_level = kwargs.pop('_level', None)
relabeling = func_or_funcs is None
columns = None
no_arg_message = ("Must provide 'func_or_funcs' or named "
"aggregation **kwargs.")
if relabeling:
columns = list(kwargs)
if not PY36:
# sort for 3.5 and earlier
columns = list(sorted(columns))
func_or_funcs = [kwargs[col] for col in columns]
kwargs = {}
if not columns:
raise TypeError(no_arg_message)
if isinstance(func_or_funcs, str):
return getattr(self, func_or_funcs)(*args, **kwargs)
if isinstance(func_or_funcs, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
ret = self._aggregate_multiple_funcs(func_or_funcs,
(_level or 0) + 1)
if relabeling:
ret.columns = columns
else:
cyfunc = self._is_cython_func(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
# _level handled at higher
if not _level and isinstance(ret, dict):
from pandas import concat
ret = concat(ret, axis=1)
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series) and _level <= 1:
msg = dedent("""\
using a dict on a Series for aggregation
is deprecated and will be removed in a future version. Use \
named aggregation instead.
>>> grouper.agg(name_1=func_1, name_2=func_2)
""")
warnings.warn(msg, FutureWarning, stacklevel=3)
columns = list(arg.keys())
arg = arg.items()
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.append(com.get_callable_name(f) or f)
arg = zip(columns, arg)
results = OrderedDict()
for name, func in arg:
obj = self
if name in results:
raise SpecificationError(
'Function names must be unique, found multiple named '
'{}'.format(name))
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if any(isinstance(x, DataFrame) for x in results.values()):
# let higher level handle
if _level:
return results
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self._selection_name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self._selection_name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
result = self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.obj.index,
names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self._selection_name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823 #24880
index = _get_index()
result = self._reindex_output(DataFrame(values, index=index))
# if self.observed is False,
# keep all-NaN rows created while re-indexing
result = result.stack(dropna=self.observed)
result.name = self._selection_name
return result
if isinstance(values[0], Series):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = Series(data=values,
index=_get_index(),
name=self._selection_name)
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
result = OrderedDict()
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
@Substitution(klass='Series', selected='A.')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
func = self._is_cython_func(func) or func
# if string function
if isinstance(func, str):
if func in base.cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs), func)
# reg transform
klass = self._selected_obj.__class__
results = []
wrapper = lambda x: func(x, *args, **kwargs)
for name, group in self:
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
indexer = self._get_index(name)
s = klass(res, indexer)
results.append(s)
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
result = concat(results).sort_index()
else:
result = Series()
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* udfs
# the cython take a different path (and casting)
dtype = self._selected_obj.dtype
if is_numeric_dtype(dtype):
result = maybe_downcast_to_dtype(result, dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
return result
def _transform_fast(self, func, func_nm):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, str):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = self._transform_should_cast(func_nm)
out = algorithms.take_1d(func()._values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notna(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notna(group)]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
msg = 'val.dtype must be object, got {}'.format(val.dtype)
assert val.dtype == object, msg
val, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isna = lambda a: a == -1
else:
_isna = isna
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isna(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
return Series(res,
index=ri,
name=self._selection_name)
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
from pandas.core.reshape.tile import cut
from pandas.core.reshape.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype('float')
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, codes=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype='bool')
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin),
np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how='left')
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
codes = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series
Count of values within each group.
"""
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
return Series(out,
index=self.grouper.result_index,
name=self._selection_name,
dtype='int64')
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None):
"""Calculate pct_change of each value to previous entry in group"""
# TODO: Remove this conditional when #23918 is fixed
if freq:
return self.apply(lambda x: x.pct_change(periods=periods,
fill_method=fill_method,
limit=limit, freq=freq))
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.labels)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = base.dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in whitelist_method_generator(
GroupBy, DataFrame, _apply_whitelist):
exec(_def_str)
_block_agg_axis = 1
_agg_see_also_doc = dedent("""
See Also
--------
pandas.DataFrame.groupby.apply
pandas.DataFrame.groupby.transform
pandas.DataFrame.aggregate
""")
_agg_examples_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': np.random.randn(4)})
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590716
2 3 4 0.704907
To control the output names with different aggregations per column,
pandas supports "named aggregation"
>>> df.groupby("A").agg(
... b_min=pd.NamedAgg(column="B", aggfunc="min"),
... c_sum=pd.NamedAgg(column="C", aggfunc="sum"))
b_min c_sum
A
1 1 -1.956929
2 3 -0.322183
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to apply to that column.
Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a callable or a string alias.
See :ref:`groupby.aggregate.named` for more.
""")
@Substitution(see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded='',
klass='DataFrame',
axis='')
@Appender(_shared_docs['aggregate'])
def aggregate(self, arg=None, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(subset, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index,
observed=self.observed)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(subset, selection=key,
grouper=self.grouper,
observed=self.observed)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if self.axis == 0:
return DataFrame(result, index=obj.columns,
columns=result_index).T
else:
return DataFrame(result, index=obj.index,
columns=result_index)
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(* map(reversed, (
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings])))
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return DataFrame(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[-1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.core.reshape.concat import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
DataFrame
Count of values within each group.
"""
from pandas.core.dtypes.missing import _isna_ndarraylike as _isna
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~_isna(np.atleast_2d(blk.get_values())))
for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(
lib.count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for
each column.
.. versionadded:: 0.20.0
Parameters
----------
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
Check for rows with the same id but conflicting values:
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
obj = self._selected_obj
def groupby_series(obj, col=None):
return SeriesGroupBy(obj,
selection=col,
grouper=self.grouper).nunique(dropna=dropna)
if isinstance(obj, Series):
results = groupby_series(obj)
else:
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
results.columns.names = obj.columns.names
if not self.as_index:
results.index = ibase.default_index(len(results))
return results
boxplot = boxplot_frame_groupby
def _is_multi_agg_with_relabel(**kwargs):
"""
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> _is_multi_agg_with_relabel(a='max')
False
>>> _is_multi_agg_with_relabel(a_max=('a', 'max'),
... a_min=('a', 'min'))
True
>>> _is_multi_agg_with_relabel()
False
"""
return all(
isinstance(v, tuple) and len(v) == 2
for v in kwargs.values()
) and kwargs
def _normalize_keyword_aggregation(kwargs):
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Dict[str, NamedAgg]`` style kwargs
to the old OrderedDict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
order : List[Tuple[str, str]]
Pairs of the input and output column names.
Examples
--------
>>> _normalize_keyword_aggregation({'output': ('input', 'sum')})
(OrderedDict([('input', ['sum'])]), ('output',), [('input', 'sum')])
"""
if not PY36:
kwargs = OrderedDict(sorted(kwargs.items()))
# Normalize the aggregation functions as Dict[column, List[func]],
# process normally, then fixup the names.
# TODO(Py35): When we drop python 3.5, change this to
# defaultdict(list)
aggspec = OrderedDict() # type: typing.OrderedDict[str, List[AggScalar]]
order = []
columns, pairs = list(zip(*kwargs.items()))
for name, (column, aggfunc) in zip(columns, pairs):
if column in aggspec:
aggspec[column].append(aggfunc)
else:
aggspec[column] = [aggfunc]
order.append((column,
com.get_callable_name(aggfunc) or aggfunc))
return aggspec, columns, order
| bsd-3-clause |
Yanakz/Caption | coco.py | 1 | 15261 | __author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. Version 1.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import copy
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = []
self.cats = []
if not annotation_file == None:
print 'loading annotations into memory...'
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, 'r'))
print datetime.datetime.utcnow() - time_t
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
cats = []
catToImgs = []
if self.dataset['type'] == 'instances':
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.datset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
anns = sum([self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns],[])
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if self.dataset['type'] == 'instances':
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for catId in catIds:
if len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if self.dataset['type'] == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
mask = COCO.decodeMask(ann['segmentation'])
img = np.ones( (mask.shape[0], mask.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, mask*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
if self.dataset['type'] == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
res.dataset['info'] = copy.deepcopy(self.dataset['info'])
res.dataset['type'] = copy.deepcopy(self.dataset['type'])
res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print 'Loading and preparing results... '
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
ann['area']=sum(ann['segmentation']['counts'][2:-1:2])
ann['bbox'] = []
ann['id'] = id
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds())
res.dataset['annotations'] = anns
res.createIndex()
return res
@staticmethod
def decodeMask(R):
"""
Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask
"""
N = len(R['counts'])
M = np.zeros( (R['size'][0]*R['size'][1], ))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(R['counts'][pos]):
R['counts'][pos]
M[n] = val
n += 1
return M.reshape((R['size']), order='F')
@staticmethod
def encodeMask(M):
"""
Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask
"""
[h, w] = M.shape
M = M.flatten(order='F')
N = len(M)
counts_list = []
pos = 0
# counts
counts_list.append(1)
diffs = np.logical_xor(M[0:N-1], M[1:N])
for diff in diffs:
if diff:
pos +=1
counts_list.append(1)
else:
counts_list[pos] += 1
# if array starts from 1. start with 0 counts for 0
if M[0] == 1:
counts_list = [0] + counts_list
return {'size': [h, w],
'counts': counts_list ,
}
@staticmethod
def segToMask( S, h, w ):
"""
Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask
"""
M = np.zeros((h,w), dtype=np.bool)
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[1:N:2]).clip(max=h-1), \
np.array(s[0:N:2]).clip(max=w-1)) # (y, x)
M[rr, cc] = 1
return M | mit |
IFDYS/IO_MPI | view_result.py | 1 | 6681 | #!/usr/bin/env python
from numpy import *
from matplotlib.pyplot import *
import matplotlib.pylab as pylab
import os
import time
import re
import obspy.signal
from scipy import signal
def read_slice(fname):
with open(fname) as fslice:
slice_nx,slice_ny,slice_nz = fslice.readline().split()
slice_x = fslice.readline().split()
slice_y = fslice.readline().split()
slice_z = fslice.readline().split()
slice_nx = int(slice_nx);slice_ny = int(slice_ny);slice_nz = int(slice_nz)
return slice_nx,slice_ny,slice_nz
def read_rec(frec):
global nrec
with open(frec) as fp:
nrec = int(file.readline().strip('\n'))
def read_par():
global nx,ny,nz,slice_nx,slice_ny,slice_nz,nt,dx,dy,dz,dt
with open('par.in') as fpar:
fpar.readline()
dx,dy,dz,dt = fpar.readline().split()
print 'dx dy dz dt: ',dx,dy,dz,dt
fpar.readline()
nx,ny,nz,nt = fpar.readline().split()
nx = int(nx);ny = int(ny);nz = int(nz);nt=int(nt)
print 'nx ny nz nt: ',nx,ny,nz,nt
fpar.readline()
nt_src = fpar.readline()
print 'nt of src: ',nt_src
fpar.readline()
step_t_wavefield,step_x_wavefield = fpar.readline().split()
print 'output time step and space step of wavefidld: ',step_t_wavefield,step_x_wavefield
fpar.readline()
step_slice = fpar.readline()
print 'output step of slice: ',step_slice
fpar.readline()
npml_x,npml_y,npml_z= fpar.readline().split()
print 'npml x y z: ',npml_x,npml_y,npml_z
fpar.readline()
fpar.readline() #pml m kapxmax kapymax kapzmax alpha
fpar.readline()
fsrc= fpar.readline().strip('\n')
print 'src.in: ',fsrc
fpar.readline()
frec= fpar.readline().strip('\n')
print 'rec.in: ',frec
fpar.readline()
feps = fpar.readline().strip('\n')
fpar.readline()
fmu = fpar.readline().strip('\n')
fpar.readline()
fsig= fpar.readline().strip('\n')
fpar.readline()
fslice= fpar.readline().strip('\n')
slice_nx,slice_ny,slice_nz = read_slice(fslice)
def view_slice():
xlist = os.popen('ls *xSlice*dat').readlines()
ylist = os.popen('ls *ySlice*dat').readlines()
zlist = os.popen('ls *zSlice*dat').readlines()
i = 0
for xname in xlist:
print xname
yname = ylist[i]
zname = zlist[i]
i += 1
xdata = loadtxt(xname.strip('\n'))
ydata = loadtxt(yname.strip('\n'))
zdata = loadtxt(zname.strip('\n'))
xslice = reshape(xdata,(slice_nx,ny,nz))
yslice = reshape(ydata,(slice_ny,nx,nz))
zslice = reshape(zdata,(slice_nz,nx,ny))
# data = reshape(data,(126,101))
clf()
imshow(xslice[0])
colorbar()
savefig(re.findall("^\w+",xname)[0]+".png")
clf()
imshow(yslice[0])
colorbar()
savefig(re.findall("^\w+",yname)[0]+".png")
clf()
imshow(zslice[0])
colorbar()
savefig(re.findall("^\w+",zname)[0]+".png")
# show()
# show(block=False)
# time.sleep(0.5)
# close()
def view_gather():
global nrec,nt,zero_offset
STD_gather = loadtxt('../STD_gather.dat')
ilist = os.popen('ls Rank*gather*dat').readlines()
ii = 0
num_name = 0
isum = []
zero_offset = []
for name in ilist:
# num_gather = 0
# figure()
ii = 0
isum = []
gather = loadtxt(name.strip('\n'))
# if gather.max() == 0 and gather.min() == 0:
# continue
if shape(gather)[0] == 0:
continue
if shape(shape(gather))[0] == 1:
isum.append(gather)
plot(gather/max(abs(gather))+ii)
else:
for i in range(len(gather[:,0])):
# num_gather += 1
if(num_name == i):
data = gather[i,:]-STD_gather
# data = signal.detrend(data)
# n = 2**int(ceil(log2(len(data))))
# freqs = np.linspace(0, 1/double(dt)/2, n/2+1)
# sp = np.fft.rfft(data,n)/n
# W = abs(sp)
# plot(freqs,W)
# show()
# # Ddata = signal.detrend(data[200:])
# # lowpass = obspy.signal.filter.lowpass(data,10000000,1.0/double(dt),corners=4,zerophase=True)
# highpass = obspy.signal.filter.highpass(data,4e7,1.0/double(dt),corners=4,zerophase=True)
# # result = gather[i,:] + lowpass
# result = highpass
# # plot(Ddata)
# # plot(lowpass)
# plot(result)
# plot(data)
zero_offset.append(data/max(abs(data)))
# plot(data)
# show()
# zero_offset.append(result/max(result))
# plot(gather[i,:]/max(abs(gather[i,:]))+ii)
isum.append(gather[i,:]/max(abs(gather[i,:])))
# print i,num_name
# plot(gather[i,:]/max(abs(gather[i,:]))+ii)
# ii =ii+1
num_name += 1
# show()
# figure()
# imshow(isum,cmap='gray',origin='lower',extent=(0,5000,0,3000),vmax=0.1, vmin=-0.1)
# show()
figure()
imshow(zero_offset,cmap='gray',origin='lower',extent=(0,5000,0,3000))
show()
# savefig('gather.png')
# for i in range(len(isum)):
# figure()
# plot(isum[i])
# show()
def prepare_RTM():
idir = './RTM/'
if not os.path.exists(idir):
os.mkdir(idir)
os.system('cp FDTD_MPI par.in rec.in mkmodel.py ./RTM/')
# os.system('cp rec.in src_RTM.in')
# with file('src_RTM.in', 'aw') as fsrc:
# for i in range(len(zero_offset)):
# savetxt(fsrc,zero_offset[i][::-1])
with file('rec.in','r') as frec:
nrec = int(frec.readline().split()[0])
fsrc=open('src_RTM.in','w')
print "nrec nt: ",int(nrec),int(nt)
fsrc.write("%d %d\n" % (nrec, int(nt)))
fsrc.close()
fsrc=open('src_RTM.in','a')
for i in range(nrec):
fsrc.write(frec.readline())
for i in range(nrec):
savetxt(fsrc,zero_offset[i][::-1])
fsrc.close
os.system('cp src_RTM.in ./RTM/src.in')
read_par()
os.chdir("./Output/")
# view_gather()
view_slice()
# os.chdir("../")
# prepare_RTM()
#view_wavefield()
| gpl-2.0 |
xwolf12/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
zorojean/scikit-learn | sklearn/covariance/graph_lasso_.py | 127 | 25626 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
erjerison/adaptability | github_submission/detect_qtls_make_table_2_5_2016.py | 1 | 13517 | import matplotlib.pylab as pt
import numpy
import matplotlib.cm as cm
import scipy.stats
from qtl_detection_one_trait import detect_qtls_one_envt
from qtl_detection_one_trait import detect_qtls_above_fitness
from qtl_detection_one_trait import detect_qtls_with_epistasis
from qtl_detection_one_trait import detect_qtls_with_epistasis2 #This is for detecting qtls with epistasis, above fitness
from qtl_detection_one_trait import calculate_qtl_confidence_intervals_lods
#This file was modified on 1/18/2017 to do the following things: first, to detect QTLs using only informative, not redundant loci.
#Second, to detect qtls separately for the two environments, in addition to jointly, to compare.
#Further modified on 2/6/2017 to detect qtls on the mean trait value rather than including all replicates, for comparison
#Import fitness and genotype data
filename1 = 'data/fitness_measurements_with_population_names_12_29_2016.csv'
filename2 = 'data/control_replicate_measurements.csv'
filename3 = 'data/segregant_genotypes_deduplicated_with_header.csv'
segregant_vector = []
init_fits_ypd = []
init_std_errs_ypd = []
init_fits_sc = []
init_std_errs_sc = []
final_fits_ypd_pops_in_ypd = []
segregant_vector_ypd_pops = []
final_fits_sc_pops_in_sc = []
segregant_vector_sc_pops = []
final_fits_sc_pops_in_ypd = []
final_fits_ypd_pops_in_sc = []
file1 = open(filename1,'r')
firstline = 0
for line in file1:
if firstline < .5:
firstline += 1
continue
linestrs = line.strip().split(';')
segregant_vector.append(linestrs[0])
init_fits_ypd.append(float(linestrs[1]))
init_std_errs_ypd.append(float(linestrs[2]))
init_fits_sc.append(float(linestrs[3]))
init_std_errs_sc.append(float(linestrs[4]))
ypd_evolved_pops = linestrs[5].split(',')
for entry in ypd_evolved_pops:
segregant_vector_ypd_pops.append(linestrs[0])
final_fits_ypd_pops_in_ypd.append(float(entry.split()[1]))
final_fits_ypd_pops_in_sc.append(float(entry.split()[2]))
sc_evolved_pops = linestrs[6].split(',')
for entry in sc_evolved_pops:
segregant_vector_sc_pops.append(linestrs[0])
final_fits_sc_pops_in_ypd.append(float(entry.split()[1]))
final_fits_sc_pops_in_sc.append(float(entry.split()[2]))
file1.close()
init_fits_ypd = numpy.array(init_fits_ypd)
init_std_errs_ypd = numpy.array(init_std_errs_ypd)
init_fits_sc = numpy.array(init_fits_sc)
init_std_errs_sc = numpy.array(init_std_errs_sc)
final_fits_ypd_pops_in_ypd = numpy.array(final_fits_ypd_pops_in_ypd)
final_fits_ypd_pops_in_sc = numpy.array(final_fits_ypd_pops_in_sc)
segregant_vector_ypd_pops = numpy.array(segregant_vector_ypd_pops)
segregant_vector_sc_pops = numpy.array(segregant_vector_sc_pops)
final_fits_sc_pops_in_ypd = numpy.array(final_fits_sc_pops_in_ypd)
final_fits_ypd_pops_in_sc = numpy.array(final_fits_ypd_pops_in_sc)
ypd_controls = {}
sc_controls = {}
file2 = open(filename2,'r')
firstline = 0
for line in file2:
if firstline < .5:
firstline += 1
continue
linestrs = line.strip().split(';')
ypd_controls[linestrs[0]] = [float(i) for i in linestrs[1].split(',')]
sc_controls[linestrs[0]] = [float(i) for i in linestrs[2].split(',')]
file2.close()
genotype_mat = []
file3 = open(filename3,'r')
marker_locations = []
firstline=1
for line in file3:
if firstline:
marker_locations = line.strip().split(';')[1].split(',')
firstline = 0
else:
linelist = line.strip().split(';')
genotype = [int(i) for i in linelist[1].split(',')]
genotype_mat.append(genotype)
genotype_mat = numpy.array(genotype_mat)
file3.close()
#Use controls (~8 technical replicates each of 24 final populations) to estimate the error variance on the final fitness measurements in each environment
n_control_pops = 24.
var_sum = 0
n_total_reps = 0
for pop in ypd_controls:
fit = numpy.mean(ypd_controls[pop])
var_sum += numpy.sum((ypd_controls[pop] - fit)**2)
n_total_reps += len(ypd_controls[pop])
measurement_error_var_sc = var_sum/float(n_total_reps - n_control_pops)
var_sum = 0
n_total_reps = 0
for pop in sc_controls:
fit = numpy.mean(sc_controls[pop])
var_sum += numpy.sum((sc_controls[pop] - fit)**2)
n_total_reps += len(sc_controls[pop])
measurement_error_var_ypd = var_sum/float(n_total_reps - n_control_pops)
###
#Set up 'helper matrix' utilities to conveniently calculate averages and variances over segregant groups
num_segs = len(segregant_vector)
num_pops_ypd = len(segregant_vector_ypd_pops)
num_pops_sc = len(segregant_vector_sc_pops)
helper_matrix_ypd_pops = numpy.zeros((num_pops_ypd,num_segs))
helper_matrix_sc_pops = numpy.zeros((num_pops_sc,num_segs))
for i in range(num_segs):
current_seg = segregant_vector[i]
helper_matrix_ypd_pops[numpy.where(segregant_vector_ypd_pops == current_seg)[0],i] = 1.
helper_matrix_sc_pops[numpy.where(segregant_vector_sc_pops == current_seg)[0],i] = 1.
pops_per_seg_ypd = numpy.diag(numpy.dot(helper_matrix_ypd_pops.T,helper_matrix_ypd_pops))
pops_per_seg_sc = numpy.diag(numpy.dot(helper_matrix_sc_pops.T,helper_matrix_sc_pops))
# #Use the helper matrix to average among populations descended from a particular segregant:
delta_fits_ypd = final_fits_ypd_pops_in_ypd - numpy.dot(helper_matrix_ypd_pops,init_fits_ypd)
delta_fits_sc = final_fits_sc_pops_in_sc - numpy.dot(helper_matrix_sc_pops,init_fits_sc)
delta_fits_ypd_in_sc = final_fits_ypd_pops_in_sc - numpy.dot(helper_matrix_ypd_pops,init_fits_sc)
delta_fits_sc_in_ypd = final_fits_sc_pops_in_ypd - numpy.dot(helper_matrix_sc_pops,init_fits_ypd)
delta_fits_ypd_means = numpy.dot(delta_fits_ypd,helper_matrix_ypd_pops)/pops_per_seg_ypd
delta_fits_sc_means = numpy.dot(delta_fits_sc,helper_matrix_sc_pops)/pops_per_seg_sc
delta_fits_sc_in_ypd_means = numpy.dot(delta_fits_sc_in_ypd,helper_matrix_sc_pops)/pops_per_seg_sc
delta_fits_ypd_in_sc_means = numpy.dot(delta_fits_ypd_in_sc,helper_matrix_ypd_pops)/pops_per_seg_ypd
#Delta fits inherit variance from the initial fitness and final fitness measurements, in addition to technical measurement error
delta_fits_sc_vars = numpy.dot((delta_fits_sc - numpy.dot(helper_matrix_sc_pops,delta_fits_sc_means))**2, helper_matrix_sc_pops)/(pops_per_seg_sc - 1.) + init_std_errs_sc**2 #- measurement_error_var_sc
delta_fits_sc_std_errs = numpy.sqrt(delta_fits_sc_vars/pops_per_seg_sc)
delta_fits_ypd_vars = numpy.dot((delta_fits_ypd - numpy.dot(helper_matrix_ypd_pops,delta_fits_ypd_means))**2, helper_matrix_ypd_pops)/(pops_per_seg_ypd - 1.) + init_std_errs_ypd**2 #- measurement_error_var_ypd
delta_fits_ypd_std_errs = numpy.sqrt(delta_fits_ypd_vars/pops_per_seg_ypd)
delta_fits_ypd_in_sc_vars = numpy.dot((delta_fits_ypd_in_sc - numpy.dot(helper_matrix_ypd_pops,delta_fits_ypd_in_sc_means))**2, helper_matrix_ypd_pops)/(pops_per_seg_ypd - 1.) + init_std_errs_sc**2 #- measurement_error_sc
delta_fits_ypd_in_sc_std_errs = numpy.sqrt(delta_fits_ypd_in_sc_vars/pops_per_seg_ypd)
delta_fits_sc_in_ypd_vars = numpy.dot((delta_fits_sc_in_ypd - numpy.dot(helper_matrix_sc_pops,delta_fits_sc_in_ypd_means))**2, helper_matrix_sc_pops)/(pops_per_seg_sc - 1.) + init_std_errs_ypd**2 #- measurement_error_ypd
delta_fits_sc_in_ypd_std_errs = numpy.sqrt(delta_fits_sc_in_ypd_vars/pops_per_seg_sc)
########QTL detection
########We output a table in the format 'marker number', 'chromosome location', 'additional fraction of variance explained', 'confidence intervals'. We will later combine this table with a .gff to add 'genes within confidence intervals'.
########For each trait, we will first calculate the QTL locations iteratively.
########We will then calculate confidence intervals for the QTL locations by bootstrapping over the 230 segregants and measuring the distribution of the location of the QTL peak
n_segs = len(pops_per_seg_ypd)
###Initial fitness qtl detection (linear model)
#Detect QTL locations
qtls_init_sc, beta_sc_init, intervals_sc = detect_qtls_one_envt(genotype_mat, init_fits_sc, helper_matrix = numpy.identity(n_segs), pops_per_seg =numpy.ones((n_segs,)))
qtls_init_ypd, beta_ypd_init, intervals_ypd = detect_qtls_one_envt(genotype_mat, init_fits_ypd, helper_matrix = numpy.identity(n_segs), pops_per_seg =numpy.ones((n_segs,)))
lower_CIs_sc = intervals_sc[:,0]
upper_CIs_sc = intervals_sc[:,1]
lower_CIs_ypd = intervals_ypd[:,0]
upper_CIs_ypd = intervals_ypd[:,1]
#Fraction of variance explained
X_qtls_sc = numpy.ones((n_segs,1))
rsq_sc_list = []
for qtl in qtls_init_sc:
X_qtls_sc = numpy.append(X_qtls_sc, genotype_mat[:, qtl].reshape((n_segs,1)), axis=1)
beta_sc = numpy.dot(numpy.linalg.inv(numpy.dot(X_qtls_sc.T, X_qtls_sc)), numpy.dot(X_qtls_sc.T, init_fits_sc))
rsq_sc_list.append(scipy.stats.pearsonr(numpy.dot(X_qtls_sc, beta_sc),init_fits_sc)[0]**2)
rsq_sc_list = numpy.array(rsq_sc_list)
rsq_sc_list_diff = numpy.append([rsq_sc_list[0]],rsq_sc_list[1:]-rsq_sc_list[0:-1],axis=0)
X_qtls_ypd = numpy.ones((n_segs,1))
rsq_ypd_list = []
for qtl in qtls_init_ypd:
X_qtls_ypd = numpy.append(X_qtls_ypd, genotype_mat[:, qtl].reshape((n_segs,1)), axis=1)
beta_ypd = numpy.dot(numpy.linalg.inv(numpy.dot(X_qtls_ypd.T, X_qtls_ypd)), numpy.dot(X_qtls_ypd.T, init_fits_ypd))
rsq_ypd_list.append(scipy.stats.pearsonr(numpy.dot(X_qtls_ypd, beta_ypd),init_fits_ypd)[0]**2)
rsq_ypd_list = numpy.array(rsq_ypd_list)
rsq_ypd_list_diff = numpy.append([rsq_ypd_list[0]],rsq_ypd_list[1:]-rsq_ypd_list[0:-1],axis=0)
#Write everything in a table
filename_out1 = 'data/initial_fitness_qtl_table_2_5_2017.csv'
file_out1 = open(filename_out1,'w')
file_out1.write('#Trait: initial segregant fitness, 37 C' + '\n')
file_out1.write('#' +(',').join(('marker number','location','location lower bound','location upper bound','var. explained YPD 30C', 'var. explained SC 37C','\n')))
for i in range(len(qtls_init_sc)):
marker = qtls_init_sc[i]
loc = marker_locations[marker]
loc_lower = marker_locations[lower_CIs_sc[i]]
loc_upper = marker_locations[upper_CIs_sc[i]]
var_sc = rsq_sc_list_diff[i]
file_out1.write((',').join((str(marker), loc, loc_lower, loc_upper, 'NA', str(var_sc),'\n')))
file_out1.write('#Trait: initial segregant fitness, 30 C' + '\n')
for i in range(len(qtls_init_ypd)):
marker = qtls_init_ypd[i]
loc = marker_locations[marker]
loc_lower = marker_locations[lower_CIs_ypd[i]]
loc_upper = marker_locations[upper_CIs_ypd[i]]
var_ypd = rsq_ypd_list_diff[i]
file_out1.write((',').join((str(marker), loc, loc_lower, loc_upper, str(var_ypd),'NA','\n')))
file_out1.close()
##Delta fitness qtl detection (linear model)
qtls_delta_sc, beta_sc_delta, intervals_sc = detect_qtls_one_envt(genotype_mat, delta_fits_sc_means, helper_matrix = numpy.identity(n_segs), pops_per_seg = numpy.ones((n_segs,)))
qtls_delta_ypd, beta_ypd_delta, intervals_ypd = detect_qtls_one_envt(genotype_mat, delta_fits_ypd_means, helper_matrix = numpy.identity(n_segs), pops_per_seg = numpy.ones((n_segs,)))
lower_CIs_sc = intervals_sc[:,0]
upper_CIs_sc = intervals_sc[:,1]
lower_CIs_ypd = intervals_ypd[:,0]
upper_CIs_ypd = intervals_ypd[:,1]
#Fraction of variance explained
X_qtls_sc = numpy.ones((n_segs,1))
rsq_sc_list = []
for qtl in qtls_delta_sc:
X_qtls_sc = numpy.append(X_qtls_sc, genotype_mat[:, qtl].reshape((n_segs,1)), axis=1)
#X_qtls_sc_extended = numpy.dot(helper_matrix_sc_pops, X_qtls_sc)
beta_sc = numpy.dot(numpy.linalg.inv(numpy.dot(X_qtls_sc.T, X_qtls_sc)), numpy.dot(X_qtls_sc.T, delta_fits_sc_means))
rsq_sc_list.append(scipy.stats.pearsonr(numpy.dot(X_qtls_sc, beta_sc),delta_fits_sc_means)[0]**2)
rsq_sc_list = numpy.array(rsq_sc_list)
rsq_sc_list_diff = numpy.append([rsq_sc_list[0]],rsq_sc_list[1:]-rsq_sc_list[0:-1],axis=0)
X_qtls_ypd = numpy.ones((n_segs,1))
rsq_ypd_list = []
for qtl in qtls_delta_ypd:
X_qtls_ypd = numpy.append(X_qtls_ypd, genotype_mat[:, qtl].reshape((n_segs,1)), axis=1)
#X_qtls_ypd_extended = numpy.dot(helper_matrix_ypd_pops, X_qtls_ypd)
beta_ypd = numpy.dot(numpy.linalg.inv(numpy.dot(X_qtls_ypd.T, X_qtls_ypd)), numpy.dot(X_qtls_ypd.T, delta_fits_ypd_means))
rsq_ypd_list.append(scipy.stats.pearsonr(numpy.dot(X_qtls_ypd, beta_ypd),delta_fits_ypd_means)[0]**2)
rsq_ypd_list = numpy.array(rsq_ypd_list)
rsq_ypd_list_diff = numpy.append([rsq_ypd_list[0]],rsq_ypd_list[1:]-rsq_ypd_list[0:-1],axis=0)
#Write everything in a table
filename_out1 = 'data/delta_fitness_qtl_table_2_5_2017.csv'
file_out1 = open(filename_out1,'w')
file_out1.write('#Trait: mean delta segregant fitness 37C' + '\n')
file_out1.write('#' +(',').join(('marker number','location','location lower bound','location upper bound','var. explained YPD 30C', 'var. explained SC 37C','\n')))
for i in range(len(qtls_delta_sc)):
marker = qtls_delta_sc[i]
loc = marker_locations[marker]
loc_lower = marker_locations[lower_CIs_sc[i]]
loc_upper = marker_locations[upper_CIs_sc[i]]
var_sc = rsq_sc_list_diff[i]
file_out1.write((',').join((str(marker), loc, loc_lower, loc_upper, 'NA', str(var_sc),'\n')))
file_out1.write('#Trait: mean delta segregant fitness 30C' + '\n')
for i in range(len(qtls_delta_ypd)):
marker = qtls_delta_ypd[i]
loc = marker_locations[marker]
loc_lower = marker_locations[lower_CIs_ypd[i]]
loc_upper = marker_locations[upper_CIs_ypd[i]]
var_ypd = rsq_ypd_list_diff[i]
file_out1.write((',').join((str(marker), loc, loc_lower, loc_upper, str(var_ypd),'NA','\n')))
file_out1.close() | mit |
alexeyum/scikit-learn | sklearn/linear_model/setup.py | 146 | 1713 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.c'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
Kruehlio/XSspec | spectrum.py | 1 | 24396 | #!/usr/bin/env python
import os
import pyfits
import numpy as np
from matplotlib import rc
from matplotlib.patheffects import withStroke
from scipy import interpolate, constants
from analysis.functions import (blur_image, ccmred)
from utils.astro import (airtovac, vactoair, absll, emll, isnumber, getebv, binspec)
from io.postproc import (checkWL, scaleSpec, applyScale, fluxCor,
telCor, applTel)
from analysis.onedspec import makeProf, extr1d, write1d, smooth1d, writeVP
from analysis.analysis import (setCont, fitCont, dlaAbs, stackLines, setMod,
setAscMod, scaleMod)
from utils.starlight import runStar, substarlight
c = constants.c/1E3
abslist, emllist = absll(), emll()
linelist = dict(emllist.items() + abslist.items())
myeffect = withStroke(foreground="w", linewidth=3)
kwargs = dict(path_effects=[myeffect])
# Theoretical sky models, RADIA is Skylines, TRANS is transmission, both at airmass
# 1.3 and for Paranal. See
# https://www.eso.org/observing/etc/bin/gen/form?INS.MODE=swspectr+INS.NAME=SKYCALC
PAR_RADIA = os.path.join(os.path.dirname(__file__), "etc/paranal_radia_15_13.txt")
PAR_TRANS = os.path.join(os.path.dirname(__file__), "etc/paranal_trans_10_13_mod.txt")
class spectrum2d:
""" Spectrum class for data manipulation and analysis
Arguments:
inst: Instrument that produced the spectrum (optional, default=xs)
Methods:
set1dFiles (Read in data file from 1d ascii spsectrum)
setHead (Read in header from fits file)
setReso (Set spectral resolving power R)
binOneDSpec (Bin one-dimensional spectrum)
binTwoDSpec (Bin two-dimensional spectrum)
setFiles (Set input files - 2d fits files)
fluxCor (Do a flux calibration)
checkWL (Check wavelength solution via cross-correlation with skylines)
smooth2d (Smooth the 2d spectrum)
vacCor (Convert from air to vacuum wavelengths)
helioCor (Convert from observed to heliocentric)
ebvCal (Correct for Galactic E_B-V)
scaleSpec (Derive scale factor from spectrum to photometry)
applyScale (Apply scale factor to spectrum)
setMod (Define physical afterglow model)
setAscMod (Defines model from ascii file)
scaleMod (Scale spectrum to afterglow model)
makeProf (Create spatial profile in 2d-spectrum)
extr1d (Extract 1d spectrum from 2d using the trace profile)
write1d (Write out 1d spectrum into ascii file)
smooth1d (Smooth the 1d spectrum)
setCont (Set a simple continuum model)
sn (Calculate signal to noise ratio depending on wavelength)
wltopix (Convert wavelength to pixel)
fitCont (Fit continuum with afterglow model)
dlaabs (Add DLA absorber to continuum)
writevp (writeout VPFit files)
telcor (Create telluric correction using telluric star observations)
appltel (Apply telluric correction from telluric star observations)
stacklines (Under progress)
"""
def __init__(self, inst = 'xs', tex =''):
self.inst = inst
self.datfiles = {'uvb': '', 'vis': '', 'nir': '', 'all': ''}
self.redshift = 0
self.output = {'all': 'ALL','uvb': 'UVB', 'vis': 'VIS', 'nir': 'NIR'}
self.nh = ''
self.object = ''
self.mult = 1E17
self.wlmult = 10
self.dAxis = {'uvb': 1, 'vis': 1, 'nir': 1}
self.tAxis = {'uvb': 1, 'vis': 1, 'nir': 1}
self.ebv, self.rv, self.vhel = '', '', 0
self.profile = {'uvb': 'moffat', 'vis': 'moffat', 'nir': 'moffat'}
self.reso = {'uvb': c/5100., 'vis': c/8800., 'nir': c/5100.}
# Intervening systems
self.intsys = {}
# Wavelength array
self.wave = {'uvb': '', 'vis': '', 'nir': ''}
# 2d data array
self.data = {'uvb': '', 'vis': '', 'nir': ''}
# 2d error array
self.erro = {'uvb': '', 'vis': '', 'nir': ''}
# 2d flag array
self.flag = {'uvb': '', 'vis': '', 'nir': ''}
# Fits header
self.head = {'uvb': '', 'vis': '', 'nir': ''}
# Optimal extraction profile
self.prof = {'uvb': '', 'vis': '', 'nir': ''}
# Trace parameters
self.trace = {'uvb': '', 'vis': '', 'nir': ''}
# Smoothed 2d data
self.smooth = {'uvb': '', 'vis': '', 'nir': ''}
# Data range in 2d fits frame, this is pixel
self.datarange = {'uvb': [], 'vis': [], 'nir': []}
# Background pixel in datarange
self.backrange = {'uvb': [5, 5], 'vis': [5, 5], 'nir': [4, 4]}
# WL range for different arms
self.wlrange = {'uvb': [3000., 5850.], 'vis': [5500, 10200],
'nir': [10000, 24500], 'all': [3000, 24500]}
# 1d data after optimal extraction
self.oneddata = {'uvb': '', 'vis': '', 'nir': ''}
self.onedback = {'uvb': '', 'vis': '', 'nir': ''}
# Smoothed 1d data after optimal extraction
self.soneddata = {'uvb': '', 'vis': '', 'nir': ''}
# 1d error after optimal extraction
self.onederro = {'uvb': '', 'vis': '', 'nir': ''}
# 1d SKY rms
self.skyrms = {'uvb': '', 'vis': '', 'nir': ''}
# 1d afterglow model based on supplied beta, AV and z
self.model = {'uvb': '', 'vis': '', 'nir': ''}
# Matching factor to Afterglow molde
self.match = {'uvb': '', 'vis': '', 'nir': ''}
# Correction factor for Gal. E_B-V
self.ebvcorr = {'uvb': '', 'vis': '', 'nir': ''}
# 1d continuum (without absorption lines)
self.cont = {'uvb': '', 'vis': '', 'nir': ''}
self.woabs = {'uvb': '', 'vis': '', 'nir': ''}
# 1d data including DLA absorption
self.oneddla = {'uvb': '', 'vis': '', 'nir': ''}
# Correction factor to photometry
self.slitcorr = {'all': '', 'uvb': '', 'vis': '', 'nir': ''}
# Telluric correction
self.telcorr = {'uvb': '', 'vis': '', 'nir': ''}
self.telwave = {'uvb': '', 'vis': '', 'nir': ''}
# Cleaned means no tellurics, no absorption lines
self.cleanwav = {'uvb': '', 'vis': '', 'nir': ''}
self.cleandat = {'uvb': '', 'vis': '', 'nir': ''}
self.cleanerr = {'uvb': '', 'vis': '', 'nir': ''}
self.lineflux = {}
self.skytel = {}
self.skyrad = {}
self.linepars = {}
# Luminosity spectrum
self.lumspec = {'uvb': '', 'vis': '', 'nir': ''}
self.lumerr = {'uvb': '', 'vis': '', 'nir': ''}
self.restwave = {'uvb': '', 'vis': '', 'nir': ''}
print '\n\t######################'
print '\tSpectrum class'
self.setSkySpec()
#self.setSkyEml()
print '\t######################'
if tex in ['yes', True, 1, 'Y', 'y']:
rc('text', usetex=True)
################################################################################
def setSkySpec(self):
skywl, skytrans, skyradia = [], [], []
filen = os.path.expanduser(PAR_TRANS)
fin = open(filen, 'r')
lines = [line.strip().split() for line in fin.readlines() if not line.strip().startswith('#')]
fin.close()
for line in lines:
if line != [] and isnumber(line[0]):
if float(line[0]) > 0.2999 and float(line[0]) < 2.5:
skywl.append(float(line[0])*1E4)
skytrans.append(float(line[1]))
self.skywl, self.skytrans = (np.array(skywl)), np.array(skytrans)
self.skywlair = vactoair(self.skywl)
filen = os.path.expanduser(PAR_RADIA)
fin = open(filen, 'r')
lines = [line.strip().split() for line in fin.readlines() if not line.strip().startswith('#')]
fin.close()
for line in lines:
if line != [] and isnumber(line[0]):
if float(line[0]) > 0.2999 and float(line[0]) < 2.5:
skyradia.append(float(line[1]))
self.skyradia = np.array(skyradia)
print '\tTheoretical Sky Spectrum set'
################################################################################
def set1dFiles(self, arm, filen, ymult = 1E-17,
mult = 10, errsc = 1., mode = 'txt', dAxis=1):
wlkey, wlstart = 'NAXIS%i'%dAxis, 'CRVAL%i'%dAxis
wlinc, wlpixst = 'CDELT%i'%dAxis, 'CRPIX%i'%dAxis
if self.datfiles.has_key(arm) and mode == 'txt':
print '\t1d-data file %s as arm %s added' %(filen, arm)
lines = [line.strip() for line in open(filen)]
wave, data, erro = np.array([]), np.array([]), np.array([])
for line in lines:
if line != [] and line.split() != [] and isnumber(line.split()[0]):
wave = np.append(wave, float(line.split()[0]))
data = np.append(data, float(line.split()[1]))
erro = np.append(erro, float(line.split()[2]))
self.wave[arm] = wave
self.oneddata[arm] = data*ymult
self.onederro[arm] = erro*errsc*ymult
self.skyrms[arm] = erro*errsc*ymult
elif self.datfiles.has_key(arm) and mode == 'fits':
print '\t1d-data fits file %s as arm %s added' %(filen, arm)
hdulist = pyfits.open(filen)
self.oneddata[arm] = hdulist[0].data
erro = abs(hdulist[1].data)
erro[erro < 1e-22] = 1e-22
self.onederro[arm] = erro*errsc
self.skyrms[arm] = erro*errsc
self.head[arm] = hdulist[0].header
pix = np.arange(self.head[arm][wlkey]) + 1
self.wave[arm] = self.head[arm][wlstart] \
+ (pix-self.head[arm][wlpixst])*self.head[arm][wlinc]*mult
hdulist.close()
else:
print 'Arm %s not known' %arm
tck1 = interpolate.InterpolatedUnivariateSpline(self.skywlair, self.skytrans)
tck2 = interpolate.InterpolatedUnivariateSpline(self.skywlair, self.skyradia)
self.skytel[arm] = tck1(self.wave[arm])
self.skyrad[arm] = tck2(self.wave[arm])
self.output[arm] = os.path.splitext(filen)[0]
################################################################################
def setHead(self, arm, filen):
if self.datfiles.has_key(arm):
hdulist = pyfits.open(filen)
self.head[arm] = hdulist[0].header
hdulist.close()
################################################################################
def setReso(self, arm, reso):
print '\tResolving power in arm %s is R = %.0f' %(arm, reso)
print '\tResolution in arm %s set to %.2f km/s' %(arm, c/reso)
self.reso[arm] = c/reso
def setWLRange(self, arm, wlrange):
self.wlrange[arm] = wlrange
def setBackRange(self, arm, back):
self.backrange[arm] = back
def setDatarange(self, arm, y1, y2):
print '\tUsing datarange from y1 = %i to y2 = %i pixels' %(y1, y2)
self.datarange[arm] = [y1, y2]
def setObject(self, objectn):
print '\tNew object added: %s' %objectn
self.object = objectn
def setVhelio(self, vhel):
self.vhel = vhel
def intSys(self, red, lines):
self.intsys[red] = lines
def showData(self, arm):
print self.data[arm]
def show1dData(self, arm):
print self.oneddata[arm]
def showWave(self, arm):
print self.wave[arm]
def showOneDData(self, arm, x = 14000):
print self.oneddata[arm][x], self.onederro[arm][x]
def showErro(self, arm):
print self.erro[arm]
def showHead(self, arm):
print self.head[arm]
def setRed(self, z):
print '\tRedshift set to %s' %z
self.redshift = z
def setOut(self, out):
print '\tOutput set to %s' %out
self.output = out
def setMult(self, mult):
self.mult = mult
def setInt(self, z, lines):
print '\tRedshift for intervening system set to %s' %z
self.intsys[z] = lines
################################################################################
def binOneDSpec(self, arm, binr = 40, meth = 'average', clip = 3, do_weight = 1):
print '\tBinning 1d spectrum by factor %i' %binr
if do_weight not in [0, 'False', False, 'N', 'no', 'n', 'No']:
print '\tUsing error-weighted average in binned spectrum'
if self.data[arm] == '':
print '\tNeed an extracted 1d spectrum first'
print '\tWill not bin / doing nothing'
else:
self.wave[arm+'o'], self.oneddata[arm+'o'], self.onederro[arm+'o'] = \
self.wave[arm], self.oneddata[arm], self.onederro[arm]
self.wave[arm], self.oneddata[arm], self.onederro[arm] = \
binspec(self.wave[arm], self.oneddata[arm], self.onederro[arm],
wl = binr, meth = meth, clip = clip, do_weight = do_weight)
################################################################################
def binTwoDSpec(self, arm, binr = 40, meth = 'average', clip = 3, do_weight = 1):
shapebin = (len(self.data[arm][0]), len(self.data[arm])/binr)
bindata, binerro = np.zeros(shapebin), np.zeros(shapebin)
print '\tBinning 2d spectrum by factor %i' %binr
if self.data[arm] == '':
print '\tNeed to provide the 2d spectrum first'
print '\tWill not bin / doing nothing'
else:
for i in range(len(self.data[arm][0])):
dataline = self.data[arm].transpose()[i]
erroline = self.erro[arm].transpose()[i]
binwave, binlinedata, binlineerro = \
binspec(self.wave[arm], dataline, erroline,
wl = binr, meth = meth, clip = clip, do_weight = do_weight)
bindata[i] = binlinedata
binerro[i] = binlineerro
self.data[arm] = bindata.transpose()
self.wave[arm] = binwave
self.erro[arm] = binerro.transpose()
################################################################################
def setFiles(self, arm, filen, filesig = '', dAxis = 1,
mult = 10, const = 0, fluxmult=1):
'''Input Files uses by default columns dispersion axis (keyword dAxis),
and assumes nm as wavelength unit (multiplies by 10, keyword mult).
Uses header keywords NAXIS, CRVAL, CDELT, CRPIX, and by default a MEF
file with the first extension data, second error. Error spectrum can
also be given as wih keyword filesig. If both are absent, we shall
assume sqrt(data) as error. No bad pixel masking is implemented yet.
Replaces errors 0 with 1E-31. Write the fits header, data, error an
wavelenths into class attributes head[arm], data[arm], erro[arm],
and wave [arm]'''
self.dAxis[arm] = dAxis
if self.dAxis[arm] == 1:
self.tAxis[arm], tAxis = 2, 2
elif self.dAxis[arm] == 2:
self.tAxis[arm], tAxis = 1, 1
self.wlmult = mult
if self.datfiles.has_key(arm):
self.head[arm] = pyfits.getheader(filen, 0)
self.inst = self.head[arm]['INSTRUME']
skl = self.head[arm]['NAXIS%i'%tAxis]
skinc = self.head[arm]['CDELT%i'%tAxis]
if self.datarange[arm] == []:
if arm in ['uvb', 'vis']: dy = 3.3/skinc
elif arm in ['nir']: dy = 3.1/skinc
self.datarange[arm] = [max(7,int(skl/2-dy)),
min(skl-7, int(skl/2+dy))]
print '\t%s datarange %i to %i pixels' \
%(arm.upper(), self.datarange[arm][0], self.datarange[arm][1])
if self.object == '':
self.output[arm] = os.path.splitext(filen)[0]
else:
self.output[arm] = self.object+'_%s' %arm
self.datfiles[arm] = filen
yminpix, ymaxpix = self.datarange[arm][0] - 1, self.datarange[arm][1] - 1
print '\tData file %s as arm %s added' %(filen, arm)
wlkey, wlstart = 'NAXIS%i'%dAxis, 'CRVAL%i'%dAxis
wlinc, wlpixst = 'CDELT%i'%dAxis, 'CRPIX%i'%dAxis
if dAxis == 1:
y = pyfits.getdata(filen, 0)[yminpix : ymaxpix].transpose()
else:
ytmp = pyfits.getdata(filen, 0).transpose()[yminpix : ymaxpix]
y = ytmp.transpose()
y[abs(y) < 1E-4/self.mult] = 1E-4/self.mult
self.data[arm] = y
if filesig != '':
if dAxis == 1:
yerr = pyfits.getdata(filesig, 0)[yminpix : ymaxpix].transpose()
else:
yerrtmp = pyfits.getdata(filesig, 0).transpose()[yminpix : ymaxpix]
yerr = yerrtmp.transpose()
if len(np.shape(yerr)) == 1:
print '\t\t1d error spectrum'
pass
else:
try:
yerr = pyfits.getdata(filen, 1)[yminpix : ymaxpix].transpose()
print '\t\tError extension found'
except IndexError:
ytmp = []
print '\t\tCan not find error spectrum -> Using std(data)'
for i in range(len(y)):
ytmp.append(np.std(y[i][yminpix:ymaxpix]))
print y
yerr = ((0*np.abs(y)**0.5).transpose() + np.array(ytmp)).transpose()
try:
yflag = pyfits.getdata(filen, 2)[yminpix : ymaxpix].transpose()
print '\t\tFlag extension found'
except IndexError:
yflag = yerr*0
self.flag[arm] = np.array(yflag)
yerr[abs(yerr) < 1E-5/self.mult] = 1E-5/self.mult
self.erro[arm] = yerr
pix = np.arange(self.head[arm][wlkey]) + 1
self.wave[arm] = (self.head[arm][wlstart] \
+ (pix-self.head[arm][wlpixst])*self.head[arm][wlinc])*mult
wlsel = (self.wlrange[arm][0] < self.wave[arm]) * (self.wave[arm] <self.wlrange[arm][1])
self.wave[arm] = np.array(self.wave[arm][wlsel])
self.erro[arm] = np.array(self.erro[arm][wlsel])
self.data[arm] = np.array(self.data[arm][wlsel]*fluxmult) + const
self.flag[arm] = np.array(self.flag[arm][wlsel])
#print '\t\tSky spectrum to spectrum grid'
tck1 = interpolate.InterpolatedUnivariateSpline(self.skywlair, self.skytrans)
tck2 = interpolate.InterpolatedUnivariateSpline(self.skywlair, self.skyradia)
self.skytel[arm] = tck1(self.wave[arm])
self.skyrad[arm] = tck2(self.wave[arm])
else:
print 'Arm %s not known' %arm
print 'Known arms: uvb, vis, nir, all'
#################################################
def smooth2d(self, arm, smoothx, smoothy = 3):
if len(self.data[arm]) != 0:
self.smooth[arm] = blur_image(self.data[arm], smoothx, smoothy)
else:
print '\t2d data not available'
################################################################################
def vacCor(self, arms):
''' Convert wavelength scale from air (default all ESO instrument incl.
X-shooter) to vacuum, using spec.astro.airtovac '''
print '\tConverting air to vacuum wavelengths'
for arm in arms:
self.wave[arm] = airtovac(self.wave[arm])
self.restwave[arm] = self.wave[arm]/(1+self.redshift)
self.output[arm] = self.output[arm]+'_vac'
################################################################################
def helioCor(self, arms, vhel = 0):
''' Corrects the wavelength scale based on a given helio-centric *correction*
value. This is not the Earth's heliocentric velocity. Using ESO keyword
HIERARCH ESO QC VRAD HELICOR. Alternatively, get the correction value via
IRAF rvcorrect'''
if self.vhel != 0:
vhel = self.vhel
if vhel == 0:
# ESO Header gives the heliocentric radial velocity correction
vhel = self.head[arms[0]]['HIERARCH ESO QC VRAD HELICOR']
self.vhel = vhel
print '\tHeliocentric velocity correction: %.2f km/s:' %vhel
# Relativistic version of 1 + vhelcor/c
lamscale = ((1 + vhel/c) / (1 - vhel/c) )**0.5
print '\tScaling wavelength by: %.6f' %(lamscale)
for arm in arms:
self.wave[arm] *= lamscale
self.restwave[arm] = self.wave[arm]/(1+self.redshift)
self.output[arm] = self.output[arm]+'_helio'
self.skywlair *= lamscale
self.skywl *= lamscale
################################################################################
def ebvCal(self, arms, ebv = '', rv = 3.08):
if ebv == '':
ra, dec = self.head[arms[0]]['RA'], self.head[arms[0]]['DEC']
ebv, std, ref, av = getebv(ra, dec, rv)
if ebv != '':
print '\t\tQueried E_B-V %.3f' %ebv
else:
ebv = 0.
self.ebv = ebv
self.rv = rv
for arm in arms:
self.ebvcorr[arm] = ccmred(self.wave[arm], ebv, rv)
################################################################################
def sn(self, arm, x1 = 1, x2 = -1):
if x2 == -1:
sn = np.median(self.oneddata[arm]/self.onederro[arm])
mfl = [np.median(self.oneddata[arm]),
np.average(self.oneddata[arm])]
else:
x1 = self.wltopix(arm, x1)
x2 = self.wltopix(arm, x2)
sn = np.median(self.oneddata[arm][x1:x2]/self.onederro[arm][x1:x2])
mfl = [np.median(self.oneddata[arm][x1:x2]),
np.average(self.oneddata[arm][x1:x2])]
return sn, mfl
################################################################################
def wltopix(self, arm, wl):
dl = (self.wave[arm][-1]-self.wave[arm][0]) / (len(self.wave[arm]) - 1)
pix = ((wl - self.wave[arm][0]) / dl)
return max(0, int(round(pix)))
################################################################################
def starlight(self, ascii, **kwargs):
return runStar(self, ascii)
def substarlight(self, arm, **kwargs):
substarlight(self, arm, **kwargs)
def scaleSpec(self, arm, **kwargs):
scaleSpec(self, arm, **kwargs)
def applyScale(self, arms, **kwargs):
applyScale(self, arms, **kwargs)
def setMod(self, arms, norm, **kwargs):
setMod(self, arms, norm, **kwargs)
def setAscMod(self, arms, mfile, **kwargs):
setAscMod(self, arms, mfile, **kwargs)
def scaleMod(self, arms, p = ''):
scaleMod(self, arms, p = '')
def makeProf(self, arm, **kwargs):
makeProf(self, arm, **kwargs)
def extr1d(self, arms, **kwargs):
extr1d(self, arms, **kwargs)
def write1d(self, arms, **kwargs):
write1d(self, arms, **kwargs)
def smooth1d(self, arm, **kwargs):
smooth1d(self, arm, **kwargs)
def setCont(self, arm, **kwargs):
setCont(self, arm, **kwargs)
def fitCont(self, arm, **kwargs):
fitCont(self, arm, **kwargs)
def dlaabs(self, arm, nh, **kwargs):
chim = dlaAbs(self, arm, nh, **kwargs)
def fluxCor(self, arm, fluxf, countf):
fluxCor(self, arm, fluxf, countf)
def checkWL(self, arms, **kwargs):
checkWL(self, arms, **kwargs)
def writevp(self, arm, **kwargs):
fname = writeVP(self, arm, **kwargs)
def telcor(self, arms):
telCor(self, arms)
def appltel(self, arms):
applTel(self, arms)
def stacklines(self, lines, **kwargs):
stackLines(self, lines, **kwargs)
| mit |
hdmetor/scikit-learn | sklearn/ensemble/tests/test_forest.py | 20 | 35216 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert():
classifier = RandomForestClassifier()
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y) | bsd-3-clause |
sniemi/SamPy | sandbox/src1/pviewer/plot1d.py | 1 | 39992 | #!/usr/bin/env python
from Tkinter import *
import Pmw
import AppShell
import sys, os
import string
from plotAscii import xdisplayfile
from tkSimpleDialog import Dialog
from pylab import *
#from fit import *
import MLab
colors = ['b','g','r','c','m','y','k','w']
linestyles = ['-','--','-.',':','.-.','-,']
symbols = ['o','^','v','<','>','s','+','x','D','d','1','2','3','4','h','H','p','|','_']
legends=['best','upper right','upper left', 'lower left','lower right','right','center left','center right','lower center','upper center','center']
def subplot_i(t,s,id,xlog,ylog):
"""
subplot_i(t,s,id,xlog,ylog) - create subplot as linear or log scale
where
t - X axis value
s - Y variable
id - specify the 3 digits sequence number(RC#) of the subplot
(at most 9 subplots allowd in a figure)
R specify the subplot row number
C specify the the subplot column number
# specify the sequence number < 10
xlog - specify X axis scale (0 linear scale, 1 logarithm scale)
ylog - specify Y axis scale (0 linear scale, 1 logarithm scale)
"""
subplot(id)
if ylog or xlog:
if ylog * xlog :
loglog(t,s)
else:
if ylog:
semilogy(t,s)
else:
semilogx(t,s)
else:
plot(t,s)
def minmax(y):
"minmax(y) - return minimum, maximum of 2D array y"
nc = len(y)
l1 = []
l2 = []
for i in range(0,nc):
l1.append(min(y[i]))
l2.append(max(y[i]))
ymin = min(l1)
ymax = max(l2)
return ymin,ymax
class plot1d(AppShell.AppShell):
usecommandarea = 1
appname = 'plot1d Python Program'
frameWidth = 500
frameHeight = 500
def createButtons(self):
"createButtons(self) - create command area buttons"
self.buttonAdd('Close',helpMessage='Close program',
statusMessage='Close and terminate this program',
command=self.closeup)
self.buttonAdd('Plot Curves',helpMessage='Use matplotlib plot',
statusMessage='Accept input and pass to matplotlib',
command=self.plotcurves)
self.buttonAdd('Subplots',helpMessage='Use matplotlib subplot',
statusMessage='Plot first 9 selected curves as separate subplots',
command=self.plotsubplot)
self.buttonAdd('CloseFigures', helpMessage='Close All Figures Windows',
statusMessage='Close all figure plot windows',
command = self.closeall)
def plotsubplot(self):
"plotsubplot(self) - plot selected curves in a subplot figure (at most 9 allowed)"
try:
CB = self.getCB()
if max(CB) < 1: return
xlog = self.xlog.get()
ylog = self.ylog.get()
t = self.x
sl = self.y
NC = len(sl)
wid=[431,432,433,434,435,436,437,438,439]
self.fig = self.fig+1
# close(self.fig)
figure(self.fig)
ndd = 0
for i in range(0,NC):
if CB[i] and ndd<9:
subplot_i(t,sl[i],wid[ndd],xlog,ylog)
ndd = ndd+1
connect('button_press_event',self.closewin)
show()
except ValueError:
self.message()
return
except AttributeError or ValueError:
return
def plotcurves(self):
"plotcurves(self) - plot all selected curves in a figure"
try:
CB = self.getCB()
if max(CB) < 1: return
ylog=self.ylog.get()
xlog=self.xlog.get()
t = self.x
sl = self.y
NC = len(sl)
self.fig = self.fig+1
# close(self.fig)
figure(self.fig,figsize=(5.5,4))
subplot(111)
nc = len(colors)-1
ns = len(symbols)
nt = len(linestyles)
t0 = []
NP = self.spp # 20
for k in range(len(t)):
if k % NP == 0:
t0.append(t[k])
labels =[]
for i in range(0,NC):
if CB[i]:
icl = i % nc
ism = i % ns
sty = colors[icl]
if self.symOn or self.styOn:
if self.symOn: sty = sty + symbols[i%ns]
if self.styOn:
sty = sty + linestyles[i%nt]
else:
sty = sty + '-'
ty = sty
if (self.styOn+self.symOn) == 0: ty=ty+'-'
lbl = self.pvs[i]
labels.append(lbl)
if self.symOn == 0:
t0 = t
s0 = sl[i]
else:
s0 = []
for k in range(len(t)):
if k % NP == 0:
s0.append(sl[i][k])
if ylog or xlog:
if ylog * xlog :
loglog(t0,s0,ty,linewidth=1,label=lbl)
else:
if ylog:
semilogy(t0,s0,ty,linewidth=1,label=lbl)
else:
semilogx(t0,s0,ty,linewidth=1,label=lbl)
else:
if self.symOn or self.styOn:
try:
plot(t0,s0,ty,linewidth=1,label=lbl)
except:
ty = sty + '-'
plot(t0,s0,ty,linewidth=1,label=lbl)
else:
plot(t,sl[i],linewidth=1,label=lbl)
self.labels = labels
gl = self.toggleGridVar.get()
if gl: grid()
xlabel(self.xlabel)
ylabel(self.ylabel)
title(self.title)
if self.legOn: legend(loc=legends[self.legloc])
connect('button_press_event',self.closewin)
show()
except ValueError:
self.message(nm='self.plotcurves()')
return
except AttributeError :
return
def message(self,nm=None,info=None):
"message(self) - display message info"
dialog = Pmw.Dialog(self.form,buttons=('OK','Cancel'),
defaultbutton='OK', title='plot1d-info')
if nm == None:
w = Label(dialog.interior(),text='First, You have to use File->Load\nto load data array from an ascii data file ',pady=20)
else:
if info == None:
w = Label(dialog.interior(),text='Warning: ValueError detected in\n\n --> '+nm,pady=20)
else:
w = Label(dialog.interior(),text= info +'\n\n --> '+nm,pady=20)
w.pack(expand=1,fill=BOTH,padx=4,pady=4)
dialog.activate()
def closeup(self):
"closeup(self) - close window and exit the plot1d program"
close('all')
fo = open('plot1d.config','w')
self.title = self.W[0].get()
self.xlabel = self.W[1].get()
self.ylabel = self.W[2].get()
st = [ self.fname,self.title,self.xlabel,self.ylabel,self.mdapath]
fo.write(str(st))
fo.close()
self.root.destroy()
self.quit()
def setxlimit(self):
"setxlimit(self) - set and update xlim for plot figure"
self.Rg[0] = self.xmin.get()
self.Rg[1] = self.xmax.get()
xlim(self.Rg[0],self.Rg[1])
def setylimit(self):
"setxlimit(self) - set and update xlim for plot figure"
self.Rg[2] = self.ymin.get()
self.Rg[3] = self.ymax.get()
ylim(self.Rg[2],self.Rg[3])
def setxcid(self):
"setxcid(self) - reset data as column array"
self.columndata()
def setxrid(self):
"setxrid(self) - reset data as row array"
self.rowdata()
def addMoreMenuBar(self):
"addMoreMenuBar(self) - create menubar user interface"
self.menuBar.addmenuitem('File','command',
'Read a MDA 1D array',
label='MDA 1D Data...',
command = self.pickMDA)
self.menuBar.addmenuitem('File','command',
'Read a column or row oriented ASCII file',
label='Load Ascii Data...',
command = self.pickFile)
self.menuBar.addmenuitem('File','command',label='------------------')
self.menuBar.addmenuitem('File','command',
'Print the plot',
label='Print plot1d.jpg',
command = self.Print)
self.menuBar.addmenuitem('File','command',
'Setup printer',
label='Printer... ',
command = self.printer)
self.menuBar.addmenuitem('File','command',label='------------------')
self.menuBar.addmenuitem('File','command',
'Quit this program',
label='Quit',
command = self.closeup)
self.menuBar.addmenuitem('Setup','command',
'Display the loaded file',
label='Display Ascii file...',
command = self.displayFile)
self.menuBar.addmenuitem('Setup','command',label='------------------')
self.menuBar.addmenuitem('Setup','command',
'Row Oriented Data Array',
label='Row Oriented',
command = self.rowdata)
self.menuBar.addmenuitem('Setup','command',
'Column Oriented Data Array',
label='Column Oriented',
command = self.columndata)
self.menuBar.addmenuitem('Setup','command',label='------------------')
self.menuBar.addmenuitem('Setup','command',
'Initialize X data range',
label='Set Full X Range...',
command = self.setupXrange)
self.menuBar.addmenuitem('Setup','command',label='------------------')
self.menuBar.addmenuitem('Setup','command',
'Select All buttons for defined curves',
label='Select All CheckButton',
command = self.CBallon)
self.menuBar.addmenuitem('Setup','command',
'Select None curves',
label='Select None CheckButton',
command = self.CBalloff)
self.menuBar.addmenuitem('Help','command',
'Help Info about plot1d',
label='Help Info...',
command = self.helpinfo)
self.menuBar.addmenu('PlotOption',' ')
self.toggleStyVar = IntVar()
self.menuBar.addmenuitem('PlotOption', 'checkbutton',
'Toggle Line style on or off',
label='Line Style On',
variable = self.toggleStyVar,
command=self.toggleSty)
self.toggleGridVar = IntVar()
self.menuBar.addmenuitem('PlotOption', 'checkbutton',
'Toggle grid lines on or off',
label='Grid Line On',
variable = self.toggleGridVar,
command=self.toggleGrid)
self.menuBar.addmenuitem('PlotOption','command',label='------------------')
self.xlog = IntVar()
self.menuBar.addmenuitem('PlotOption', 'checkbutton',
'Toggle Xlog plot on or off',
label='Log Xaxis On',
variable = self.xlog)
self.ylog = IntVar()
self.menuBar.addmenuitem('PlotOption', 'checkbutton',
'Toggle Ylog plot on or off',
label='Log Yaxis On',
variable = self.ylog)
self.menuBar.addmenuitem('PlotOption','command',label='------------------')
self.toggleSymVar = IntVar()
self.menuBar.addmenuitem('PlotOption', 'checkbutton',
'Toggle symbol on or off',
label='Symbol On',
variable = self.toggleSymVar,
command=self.toggleSym)
self.menuBar.addmenuitem('PlotOption','command',
'Dialog to setup up symbols',
label='Setup Symbols...',
command=self.setsymbols)
self.menuBar.addmenu('Legend','set up legend ')
self.toggleLegVar = IntVar()
self.menuBar.addmenuitem('Legend', 'checkbutton',
'Toggle legend on or off',
label='Legend On',
variable = self.toggleLegVar,
command=self.toggleLegend)
self.menuBar.addmenuitem('Legend', 'command',
'pick desired location',
label='Default Legend Location',
command=self.pickLegpos)
self.menuBar.addmenuitem('Legend','command',label='------------------')
self.menuBar.addmenuitem('Legend','command',
'Dialog to setup up new legend position',
label='User Legend Location...',
command=self.setlegpos)
self.menuBar.addmenuitem('Legend','command',label='------------------')
self.menuBar.addmenuitem('Legend','command',
'Setup legend names',
label='Setup Legend Labels...',
command = self.getlegend)
self.menuBar.addmenu('Analysis','set up polynomial fitting ')
self.menuBar.addmenuitem('Analysis','command',
'Setup curve # for statistic plot',
label='Statistic ...',
command = self.statisticDialog)
self.menuBar.addmenuitem('Analysis','command',
'Setup line # for least square fitting',
label='Fitting ...',
command = self.fittingNumDialog)
self.menuBar.addmenuitem('Analysis','command',
'Setup bin numbers # for histogram plot',
label='Histogram ...',
command = self.histDialog)
self.menuBar.addmenuitem('Analysis','command',
'Setup curve # for errorbar plot',
label='Errorbar ...',
command = self.errorbarDialog)
def statisticDialog(self):
"statisticDialog(self) - statistic calculation"
import Tkinter
if self.stdFrame != None: self.statisticDone()
top=Toplevel()
self.stdFrame=top
fm = Frame(top,borderwidth=0)
top.title('Statistic Dialog...')
Label(fm,text='Curve # [1-'+str(self.nc)+']').grid(row=1,column=1,sticky=W)
asply = tuple(range(1,self.nc+1))
self.stdVar = Pmw.ComboBox(fm,label_text='Pick:',
labelpos=W, listbox_width=5,dropdown=1,
scrolledlist_items=asply)
self.stdVar.selectitem(0)
self.stdVar.grid(row=1,column=2,sticky=W)
Tkinter.Button(fm,text='Close',command=self.statisticDone).grid(row=4,column=1,stick=W)
Tkinter.Button(fm,text='Accept',command=self.statisticCalc).grid(row=4,column=2,stick=W)
Tkinter.Button(fm,text='Next',command=self.statisticNext).grid(row=4,column=3,stick=W)
Tkinter.Button(fm,text='Prev',command=self.statisticPrev).grid(row=4,column=4,stick=W)
Tkinter.Button(fm,text='All...',command=self.statisticAll).grid(row=4,column=5,stick=W)
fm.pack()
fm2 = Frame(top,borderwidth=0)
self.stdL = [StringVar(),StringVar(),StringVar(),StringVar(),StringVar(),StringVar()]
Label(fm2, textvariable=self.stdL[0]).pack(side=TOP)
Label(fm2, textvariable=self.stdL[1]).pack(side=TOP)
Label(fm2, textvariable=self.stdL[2]).pack(side=TOP)
Label(fm2, textvariable=self.stdL[3]).pack(side=TOP)
Label(fm2, textvariable=self.stdL[4]).pack(side=TOP)
Label(fm2, textvariable=self.stdL[5]).pack(side=TOP)
self.stdL[0].set('Mean:')
self.stdL[1].set('Standard Deviation:')
self.stdL[2].set('Ymin,Ymax:')
self.stdL[3].set('Ymax @ Xpos:')
self.stdL[4].set('Y-hpeak @ X-hpeak:')
self.stdL[5].set('FWHM:')
fm2.pack()
def statisticAll(self):
"statisticAll(self) - calculate statistic for all curves"
out = []
for id in range(self.nc):
y = self.y[id-1]
ymin,ymax = min(y),max(y)
y_hpeak = ymin + .5 *(ymax-ymin)
x = self.x
x_hpeak = []
for i in range(self.NPT):
if y[i] >= y_hpeak:
i1 = i
break
for i in range(i1+1,self.NPT):
if y[i] <= y_hpeak:
i2 = i
break
if i == self.NPT-1: i2 = i
x_hpeak = [x[i1],x[i2]]
fwhm = abs(x_hpeak[1]-x_hpeak[0])
for i in range(self.NPT):
if y[i] == ymax:
jmax = i
break
xpeak = x[jmax]
out.append([MLab.mean(y),MLab.std(y),ymin,ymax,xpeak,jmax,y_hpeak,x_hpeak,fwhm])
fo = open('fwhm.txt','w')
fo.write('File: '+self.fname)
for id in range(self.nc):
list = out[id]
fo.write('\n\nCurve #'+str(id+1))
fo.write('\nMean: '+ str(list[0]))
fo.write('\nStandard Deviation: '+ str(list[1]))
fo.write('\nYmin, Ymax: '+ str(list[2]) + ', '+ str(list[3]))
fo.write('\nYmax @ Xpos[i]: ' + str(list[4]) +'[i='+str(list[5])+']')
fo.write('\nY-hpeak @ X-hpeak: ' + str(list[6]) +' @ '+str(list[7]))
fo.write('\nFWHM: ' + str(list[8]))
fo.close()
xdisplayfile('fwhm.txt')
def statisticPrev(self):
"statisticPrev(self) - statistic calculation for previous curve"
id = string.atoi(self.stdVar.get())
id = id - 1
if id < 1: id = self.nc
self.stdVar.selectitem(id-1)
self.statisticCalc()
def statisticNext(self):
"statisticNext(self) - statistic calculation for next curve"
id = string.atoi(self.stdVar.get())
id = id+1
if id > self.nc: id = 1
self.stdVar.selectitem(id-1)
self.statisticCalc()
def statisticCalc(self):
"statisticCalc(self) - statistic calculation "
id = string.atoi(self.stdVar.get())
if id <1 or id > self.nc: return
y = self.y[id-1]
ymin,ymax = min(y),max(y)
y_hpeak = ymin + .5 *(ymax-ymin)
x = self.x
x_hpeak = []
for i in range(self.NPT):
if y[i] >= y_hpeak:
i1 = i
break
for i in range(i1+1,self.NPT):
if y[i] <= y_hpeak:
i2 = i
break
if i == self.NPT-1: i2 = i
if y[i1] == y_hpeak: x_hpeak_l = x[i1]
else:
x_hpeak_l = (y_hpeak-y[i1-1])/(y[i1]-y[i1-1])*(x[i1]-x[i1-1])+x[i1-1]
if y[i2] == y_hpeak: x_hpeak_r = x[i2]
else:
x_hpeak_r = (y_hpeak-y[i2-1])/(y[i2]-y[i2-1])*(x[i2]-x[i2-1])+x[i2-1]
x_hpeak = [x_hpeak_l,x_hpeak_r]
self.fwhm = abs(x_hpeak[1]-x_hpeak[0])
for i in range(self.NPT):
if y[i] == ymax:
jmax = i
break
xpeak = x[jmax]
self.stdL[0].set('Curve #'+str(id)+' Mean: '+ str(MLab.mean(y)))
self.stdL[1].set('Standard Deviation: '+ str(MLab.std(y)))
self.stdL[2].set('Ymin, Ymax: '+ str(ymin) + ', '+ str(ymax))
self.stdL[3].set('Ymax @ Xpos[i]: ' + str(xpeak) +'[i='+str(jmax)+']')
self.stdL[4].set('Y-hpeak @ X-hpeak: ' + str(y_hpeak) +' @ '+str(x_hpeak))
self.stdL[5].set('FWHM: ' + str(self.fwhm))
def statisticDone(self):
"statisticDone(self) - close statistic dialog"
self.stdFrame.destroy()
self.stdFrame = None
def errorbarDialog(self):
"errorbarDialog(self) - dialog to setup errorbar plot"
import Tkinter
if self.errFrame != None: self.errDone()
top=Toplevel()
self.errFrame=top
fm = Frame(top,borderwidth=0)
top.title('Errorbar Dialog...')
self.errVar = [IntVar(),StringVar(),IntVar()]
Label(fm,text='Curve # [1-'+str(self.nc)+']:').grid(row=1,column=1,sticky=W)
asply = tuple(range(1,self.nc+1))
self.errVar[0] = Pmw.ComboBox(fm,label_text='Pick:',
labelpos=W, listbox_width=5,dropdown=1,
scrolledlist_items=asply)
self.errVar[0].grid(row=1,column=2,sticky=W)
self.errVar[0].selectitem(0)
Label(fm,text='Relative Y Errorbar:').grid(row=2,column=1,sticky=W)
Entry(fm,width=10,textvariable=self.errVar[1]).grid(row=2,column=2,sticky=W)
Label(fm,text='Plot Horizontally:').grid(row=3,column=1,sticky=W)
Checkbutton(fm,variable=self.errVar[2],state=NORMAL).grid(row=3,column=2,sticky=W)
self.errVar[1].set('.1')
Tkinter.Button(fm,text='Accept',command=self.errRun).grid(row=4,column=1,stick=W)
Tkinter.Button(fm,text='Close',command=self.errDone).grid(row=4,column=2,stick=W)
fm.pack(fill=BOTH)
def errDone(self):
"errDone(self) - close error bar dialog"
self.errFrame.destroy()
self.errFrame = None
def errRun(self):
"errRun(self) - plot error bar for selected curve"
ic = string.atoi(self.errVar[0].get())
if ic > 0 and ic < self.nc:
err = string.atof(self.errVar[1].get())
hz = self.errVar[2].get()
self.fig = self.fig+1
figure(self.fig,figsize=(5.5,4))
x = self.x
y = self.y
from Numeric import multiply
yerr = multiply(y[ic-1],err)
if hz:
errorbar(y[ic-1],x,xerr=yerr)
else:
errorbar(x,y[ic-1],yerr=yerr)
title('Curve # '+str(ic))
connect('button_press_event',self.closewin)
show()
def histDialog(self):
"histogramDialog(self) - dialog to setup histogram plot"
import Tkinter
if self.histFrame != None: self.histDone()
top=Toplevel()
self.histFrame=top
fm = Frame(top,borderwidth=0)
top.title('Histogram Dialog...')
self.histVar = [StringVar(),IntVar(),IntVar()]
Label(fm,text='Curve # [1-'+str(self.nc)+']:').grid(row=1,column=1,sticky=W)
asply = tuple(range(1,self.nc+1))
self.histVar[0] = Pmw.ComboBox(fm,label_text='Pick:',
labelpos=W, listbox_width=5,dropdown=1,
scrolledlist_items=asply)
self.histVar[0].grid(row=1,column=2,sticky=W)
self.histVar[0].selectitem(0)
Label(fm,text='Number of bins:').grid(row=2,column=1,sticky=W)
Entry(fm,width=10,textvariable=self.histVar[1]).grid(row=2,column=2,sticky=W)
Label(fm,text='Horizontal:').grid(row=3,column=1,sticky=W)
Checkbutton(fm,variable=self.histVar[2],state=NORMAL).grid(row=3,column=2,sticky=W)
self.histVar[1].set(20)
Tkinter.Button(fm,text='Accept',command=self.histRun).grid(row=4,column=1,stick=W)
Tkinter.Button(fm,text='Close',command=self.histDone).grid(row=4,column=2,stick=W)
fm.pack(fill=BOTH)
def histRun(self):
"histRun(self) - do histogram plot for selected curve"
ic = string.atoi(self.histVar[0].get())
if ic > 0 and ic < self.nc:
nbin = self.histVar[1].get()
hz = self.histVar[2].get()
self.fig = self.fig+1
figure(self.fig,figsize=(5.5,4))
y = self.y
if hz:
n,bins,patches = hist(y[ic-1],nbin,orientation='horizontal')
xlabel('Occurance')
else:
n,bins,patches = hist(y[ic-1], nbin)
ylabel('Occurance')
title('Curve # '+str(ic)+': Histogram for bin='+str(nbin))
connect('button_press_event',self.closewin)
show()
print n
def histDone(self):
"histDone(self) - close histogram dialog"
self.histFrame.destroy()
self.histFrame = None
def setupXrange(self,title=None):
"setupXrange(self) - dialog to reset X axis data range"
import Tkinter
top=Toplevel()
self.setXFrame=top
fm = Frame(top,borderwidth=0)
if title == None: ntitle='Set New Data XRange...'
else: ntitle = title
top.title(ntitle)
self.xVar = [StringVar(),StringVar()]
Label(fm,text='Plot Start Coordinate X[0]:').grid(row=1,column=1,sticky=W)
Entry(fm,width=20,textvariable=self.xVar[0]).grid(row=1,column=2,sticky=W)
sz = len(self.x)
Label(fm,text='Plot Stop Coordinate X['+str(sz-1)+']:').grid(row=2,column=1,sticky=W)
Entry(fm,width=20,textvariable=self.xVar[1]).grid(row=2,column=2,sticky=W)
self.xVar[0].set(0)
self.xVar[1].set(sz-1)
Tkinter.Button(fm,text='Close',command=self.setupXrangeDone).grid(row=4,column=1,stick=W)
Tkinter.Button(fm,text='Accept',command=self.setupXrangeRun).grid(row=4,column=2,stick=W)
Tkinter.Button(fm,text='Reset',command=self.setupXrangeReset).grid(row=2,column=3,stick=W)
# if title != None:
# Tkinter.Button(fm,text='Functions Fit...',command=self.otherfit).grid(row=4,column=3,stick=W)
fm.pack(fill=BOTH)
def setupXrangeReset(self):
"setupXrangeReset(self) - set X range value"
self.xVar[0].set(str(self.xcord[0]))
self.xVar[1].set(str(self.xcord[self.NPT-1]))
def setupXrangeDone(self):
"setupXrangeDone(self) - close X range dialog"
self.setXFrame.destroy()
def setupXrangeRun(self):
"setupXrangeRun(self) - accept and setup X range"
x1 = string.atof(self.xVar[0].get())
x2 = string.atof(self.xVar[1].get())
dx = (x2-x1)/(self.NPT-1)
x = arange(x1,x2+.001,dx)
y = self.y
self.initfields(x,y)
def fittingNumDialog(self):
"fittingNumDialog(self) - dialog to enter curve # for fitting"
import Tkinter
if self.fitFrame != None: self.fittingDone()
top=Toplevel()
self.fitFrame=top
fm = Frame(top,borderwidth=0)
top.title('Least Square Fitting Dialog...')
self.fitVar = [IntVar(),IntVar(),StringVar(),StringVar(),StringVar(),StringVar()]
Label(fm,text='Curve # to be fitted').grid(row=1,column=1,sticky=W)
asply = tuple(range(1,self.nc+1))
self.fitVar[0] = Pmw.ComboBox(fm,label_text='[1-'+str(self.nc)+'] Pick:',
labelpos=W, listbox_width=5,dropdown=1,
scrolledlist_items=asply)
self.fitVar[0].grid(row=1,column=2,sticky=W)
self.fitVar[0].selectitem(0)
Label(fm,text='Polynomial order #:').grid(row=2,column=1,sticky=W)
Entry(fm,width=10,textvariable=self.fitVar[1]).grid(row=2,column=2,sticky=W)
Tkinter.Button(fm,text='Polynomial Fit...',command=self.fittingRun).grid(row=4,column=1,stick=W)
# Tkinter.Button(fm,text='Functions Fit...',command=self.otherfit).grid(row=4,column=2,stick=W)
Tkinter.Button(fm,text='Close',command=self.fittingDone).grid(row=4,column=3,stick=W)
# Tkinter.Button(fm,text='Help...',command=self.fittingHelp).grid(row=5,column=1,stick=W)
# Tkinter.Button(fm,text='Try New Fit Xrange...',command=self.otherxfit).grid(row=5,column=2,stick=W)
Label(fm,text='Output Title:').grid(row=15,column=1,sticky=W)
Entry(fm,width=40,textvariable=self.fitVar[2]).grid(row=15,column=2,sticky=W)
Label(fm,text='Output Xlabel:').grid(row=16,column=1,sticky=W)
Entry(fm,width=40,textvariable=self.fitVar[3]).grid(row=16,column=2,sticky=W)
Label(fm,text='Output Ylabel:').grid(row=17,column=1,sticky=W)
Entry(fm,width=40,textvariable=self.fitVar[4]).grid(row=17,column=2,sticky=W)
Label(fm,text='Ouput Fitting Coeffs:').grid(row=18,column=1,sticky=W)
Entry(fm,width=40,textvariable=self.fitVar[5]).grid(row=18,column=2,sticky=W)
# self.fitVar[0].set(1)
self.fitVar[1].set(2)
self.fitVar[2].set('Fitting Result Curve #')
self.fitVar[3].set('Polynomial Power')
self.fitVar[4].set('Polynomial Regression')
fm.pack(fill=BOTH)
def fittingDone(self):
"fittingDone(self) - close fitting dialog"
self.fitFrame.destroy()
self.fitFrame = None
def fittingHelp(self):
'fittingHelp(self) - help on fitting dialog'
text = 'Polynomial Fit... - use curve # and order # to do polynomial fit\n-->Functions Fit... - use default X value in various fitting functions\n --> Try New Fit Xrange... - if fit failed with default X values use the Xrange dialog to try oher X range'
self.message(nm=text,info='Fitting Info')
def otherxfit(self):
'otherxfit(self) - try fit with different X range'
if self.setXFrame != None: self.setXFrame.destroy()
self.setupXrange(title='Try New Fitting X Range')
self.xVar[0].set('-'+self.xVar[1].get())
def otherfit(self):
'otherfit(self) - pop up Least Square fit dialog'
id = string.atoi(self.fitVar[0].get())
if id > 0 and id <= self.nc:
x = self.x
y = self.y[id-1]
x1 = string.atof(self.xmin.get())
x2 = string.atof(self.xmax.get())
i1 = 0
for i in range(self.NPT):
if x[i] <= x1:
i1 = i
else:
break
for i in range(i1,self.NPT):
if x[i] <= x2:
i2 = i
else:
break
data = []
for k in range(i1,i2+1):
data.append( (x[k],y[k]) )
Fit = FitDialog(self.fitFrame)
Fit.x = x
Fit.y = y
Fit.data = data
Fit.legd = 0
Fit.Wtitle = 'Curve # '+str(id)
Fit.createDialog(self.fitFrame)
def fittingRun(self):
"fittingRun(self) - accept power and curve # to do polynomial fit"
id = string.atoi(self.fitVar[0].get())
pow = self.fitVar[1].get()
if id > 0 and id < self.nc:
x = self.x
y = self.y[id-1]
x1 = string.atof(self.xmin.get())
x2 = string.atof(self.xmax.get())
i1 = 0
for i in range(self.NPT):
if x[i] <= x1:
i1 = i
else:
break
for i in range(i1,self.NPT):
if x[i] <= x2:
i2 = i
else:
break
x = x[i1:i2+1]
y = y[i1:i2+1]
# linear polynomial fit
coeffs = polyfit(x,y,pow)
self.fitVar[5].set(str(coeffs))
z = polyval(coeffs,x)
self.fig=self.fig+1
figure(self.fig,figsize=(5.5,4))
plot(x,y,'b+', x, z ,'-k',linewidth=1)
tit = self.fitVar[2].get() +' ' + str(id)
title(tit)
xtit = self.fitVar[3].get() + ' '+str(pow)
xlabel(xtit)
ytit = self.fitVar[4].get()
ylabel(ytit)
gl = self.toggleGridVar.get()
grid(gl)
connect('button_press_event',self.closewin)
show()
def closewin(self,event):
'closewin(self,event) - right mouse button to close plot window'
if event.button == 3: close()
def closeall(self):
'closeall(self) - close all plot windows'
close('all')
self.fig = 0
def printer(self):
'printer(self) - dialog to set up printer'
from tv import setupPrinter
root=self.interior()
dialog = setupPrinter(root)
def Print(self):
'Print(self) - save plot to plot1d.png and send to PS printer'
from plot2d import printPicture
savefig('plot1d.png')
ptr = self.SH['printer']
printPicture('plot1d.png',ptr)
def doneLeg(self):
'doneLeg(self) - close setup legend dialog'
self.legFrame.destroy()
self.legFrame = None
def toggleLeg(self):
"toggleLeg(self) - get default legend position"
self.legloc = self.legVar.get()
self.doneLeg()
try:
if self.legOn: legend(self.labels,loc=legends[self.legloc])
except:
pass
def pickLegpos(self):
"pickLegpos(self) - dialog to pick legend position"
import Tkinter
if self.legFrame != None: self.doneLeg()
top=Toplevel()
self.legFrame=top
fm = Frame(top,borderwidth=0)
var = IntVar()
for i in range(len(legends)):
Radiobutton(fm,text=legends[i],value=i,variable=var,
command=self.toggleLeg,
indicatoron=1).pack(anchor=W)
var.set(0)
self.legVar= var
fm.pack(fill=BOTH)
def toggleLegend(self):
"toggleLegend(self) - set Legend on or off"
self.legOn = self.toggleLegVar.get()
def toggleSym(self):
"toggleSym(self) - set symbols on or off"
self.symOn = self.toggleSymVar.get()
def toggleGrid(self):
"toggleGrid(self) - set grid line on or off"
gl = self.toggleGridVar.get()
grid(gl)
def toggleSty(self):
"toggleSty(self) - set line style on or off"
self.styOn = self.toggleStyVar.get()
def getlegpos(self):
"getlegpos(self) - get and set new legposition"
locx = self.locx.get()
locy = self.locy.get()
self.legFrame.destroy()
self.legFrame = None
try:
loc = (string.atof(locx),string.atof(locy))
if self.legOn: legend(self.labels, loc=loc)
except:
pass
def setlegpos(self):
"setlegpos(self) - dialog to set legend position"
import Tkinter
if self.legFrame != None: return
top=Toplevel()
top.title('Enter Legend Location')
self.legFrame=top
fm = Frame(top,borderwidth=0)
self.locx,self.locy = StringVar(), StringVar()
Label(fm,text='ENTER LEGEND LOCATION').grid(row=0,column=1,sticky=W)
Label(fm,text='Normalized X loc[0-1]:').grid(row=1,column=1,sticky=W)
Entry(fm,width=5,textvariable=self.locx).grid(row=1,column=2,sticky=W)
self.locx.set(0.8)
Label(fm,text='Normalized Y loc[0-1]:').grid(row=2,column=1,sticky=W)
Entry(fm,width=5,textvariable=self.locy).grid(row=2,column=2,sticky=W)
self.locy.set(0.8)
fm.pack(fill=BOTH)
Tkinter.Button(top,text='OK',command=self.getlegpos).pack()
def getsymbols(self):
"getsymbols(self) - get and set new symbols"
self.spp = string.atoi(self.sppVar.get())
if self.spp < 1: self.spp = 1
for i in range(len(symbols)):
symbols[i] = self.sym[i].get()
def getsymbolClose(self):
'getsymbolClose(self) - close symbol dialog'
self.SymFrame.destroy()
def setsymbols(self):
"setsymbols(self) - dialog to modify and set new symbols"
import Tkinter
top=Toplevel()
top.title('Symbol Definition')
self.SymFrame=top
sym=[]
for i in range(len(symbols)):
sym.append(StringVar())
fm = Frame(top,borderwidth=0)
for i in range(len(symbols)):
Label(fm,text='symbol for line '+str(i+1)).grid(row=i,column=1,sticky=W)
Entry(fm,width=1,textvariable=sym[i]).grid(row=i,column=2,sticky=W)
sym[i].set(symbols[i])
self.sym = sym
Label(fm,text='DataSteps/symbol').grid(row=20,column=1,sticky=W)
self.sppVar = StringVar()
Entry(fm,width=5,textvariable=self.sppVar).grid(row=20,column=2,sticky=W)
self.sppVar.set(str(self.spp))
fm.pack(fill=BOTH)
fm1 = Frame(top,borderwidth=1)
Tkinter.Button(fm1,text=' OK ',command=self.getsymbols).pack(side=LEFT)
Tkinter.Button(fm1,text='Cancel',command=self.getsymbolClose).pack(side=LEFT)
fm1.pack(fill=BOTH)
def helpinfo(self):
"helpinfo(self) - display plot1d_help.txt with scrolled text"
fname = os.environ['PYTHONSTARTUP']+os.sep+'plot1d_help.txt'
top = Toplevel()
st = Pmw.ScrolledText(top,borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=600,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.importfile(fname)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
def getlegend(self):
"getlegend(self) - dialog to set legends for plot at most 85"
from plotAscii import GetLegends,loadpvs
V = 85*['']
for i in range(self.nc):
V[i] = 'D_'+str(i+1)
file='pvs'
fd = open(file,'w')
fd.write(str(V))
fd.close()
top = self.form
GetLegends(top)
self.pvs = loadpvs()
def displayFile(self):
"displayFile(self) - display picked text file"
if self.fname != '': xdisplayfile(self.fname)
def rowdata(self):
"rowdata(self) - extract x,y vectors from row oriented text array"
try:
data = self.data
nc = len(data)
NPT = len(data[0])
self.NPT = NPT
try:
xid = int(self.xrid.get())
yid = int(self.yrid.get())
except ValueError:
self.message(nm='Row data array - X row #:\nonly single integer # allowed')
return
if xid < 0 or xid >= nc:
x = range(NPT)
y = data[0:nc]
else:
x = data[xid]
y = []
for i in range(nc):
if i >= yid:
y.append(data[i])
self.initfields(x,y)
except AttributeError:
self.message()
return
def columndata(self):
"columndata(self) - extract x,y vectors from column oriented text array"
try:
from plotAscii import transposeA
data = self.data
NPT = len(data)
self.NPT = NPT
NC = len(data[0])
if NC <= 1:
print 'bad file'
return
self.W[0].setentry(self.fname)
da = transposeA(data)
try:
xid = int(self.xcid.get())
yid = int(self.ycid.get())
except ValueError:
self.message(nm='Column: X col #:\nonly single integer # allowed')
return
if xid < 0:
x = range(NPT)
y = da[0:NC]
else:
x = da[xid]
y=[]
for i in range(NC):
if i >= yid:
y.append(da[i])
self.initfields(x,y)
self.xcord = x
except AttributeError:
self.message()
return
def pickMDA(self):
'pickMDA(self) - dialog to pick MDA file and load 1D array into memory'
import tkFileDialog
from readMDA import *
fname = tkFileDialog.askopenfilename( initialdir=self.mdapath,
filetypes=[("MDA File", '.mda'),
("All Files","*")])
if fname == (): return
(self.mdapath, fn) = os.path.split(fname)
self.mdafile = fn # fname
self.W[0].setentry(fname)
d = readMDA(fname,maxdim=1)
try:
if d[1].nd> 0:
# print '1D data found'
self.W[1].setentry(d[1].p[0].fieldName)
x = d[1].p[0].data
self.NPT = len(x)
data = []
labels = []
for i in range(d[1].nd):
data.append(array(d[1].d[i].data))
labels.append(d[1].d[i].name)
self.pvs = labels
self.initfields(x,data)
self.xcord = x
except IndexError:
pass
def pickFile(self):
"pickFile(self) - dialog to pick a text data file"
from plotAscii import readArray
import tkFileDialog
fname = tkFileDialog.askopenfilename(initialdir=self.txtpath,
initialfile='*.txt')
if fname ==(): return
self.fname = fname
data = readArray(fname)
self.data = data
self.columndata()
def initfields(self,x,y):
"initfields(self,x,y) - initialize X,Y ranges fields from x,y vectors"
self.x = x
self.y = y
self.nc = len(y)
xmax = max(x)
xmin = min(x)
self.Rg[0] = xmin
self.Rg[1] = xmax
ymin,ymax = minmax(y)
self.Rg[2] = ymin
self.Rg[3] = ymax
self.xmin.setentry(str(xmin))
self.xmax.setentry(str(xmax))
self.ymin.setentry(str(ymin))
self.ymax.setentry(str(ymax))
self.createCB()
def createCB(self):
"createCB(self) - update CheckButtons to reflect the defined y vectors"
nc = self.nc # 85
checkD=[]
var =[]
for i in range(nc):
di = str(i+1)
var.append(IntVar())
if i < 2:
var[i].set(1)
if i > 9:
ii = i % 10
ij = i / 10
checkD.append((di,ij,ii,NORMAL))
else:
checkD.append((di,0,i,NORMAL))
self.var = var
if self.CBframe != -1: self.CBframe.destroy()
frame = Frame(self.form,borderwidth=0)
for i in range(nc):
Checkbutton(frame,text=checkD[i][0],state=checkD[i][3], anchor=W,
variable=var[i]).grid(row=checkD[i][1],column=checkD[i][2],sticky=W)
frame.pack()
self.CBframe = frame
# self.getCB()
def getCB(self):
"getCB(self) - get the state of all checked buttons"
value=[]
for i in range(self.nc):
value.append(self.var[i].get())
return value
def CBallon(self):
"CBallon(self) - select all check buttons for Y vectors"
if self.nc > 1:
for i in range(self.nc):
self.var[i].set(1)
def CBalloff(self):
"CBalloff(self) - unselect all check buttons for Y vectors"
if self.nc > 1:
for i in range(self.nc):
self.var[i].set(0)
def settitle(self):
"settitle(self) - update the title of plot figure"
title(self.W[0].get())
def setxlabel(self):
"setxlabel(self) - update the xlabel of plot figure"
xlabel(self.W[1].get())
def setylabel(self):
"setylabel(self) - update the ylabel of plot figure"
ylabel(self.W[2].get())
def createFields(self):
"createFields(self) - create modifiable control fields for plot"
self.form = self.interior()
self.W = [StringVar(),StringVar(),StringVar()]
self.W[0] = Pmw.EntryField(self.form,labelpos=W,value=self.title,
label_text = 'Title', command=self.settitle)
self.W[1] = Pmw.EntryField(self.form,labelpos=W,value=self.xlabel,
label_text = 'Xlabel', command=self.setxlabel)
self.W[2] = Pmw.EntryField(self.form,labelpos=W,value=self.ylabel,
label_text = 'Ylabel', command=self.setylabel)
self.W[0].pack(fill=X)
self.W[1].pack(fill=X)
self.W[2].pack(fill=X)
frame = Frame(self.form,borderwidth=0)
self.xmin = Pmw.EntryField(frame,labelpos=W,value=self.Rg[0],
label_text = 'Xrange Xmin:', command=self.setxlimit,
# validate = {'validator':'real','min':0,'max':100,'minstrict':0}
)
self.xmax = Pmw.EntryField(frame,labelpos=W,value=self.Rg[1],
label_text = ' Xmax:', command=self.setxlimit
)
self.xmin.grid(row=1,column=1,sticky=W)
self.xmax.grid(row=1,column=2,sticky=W)
self.ymin = Pmw.EntryField(frame,labelpos=W,value=self.Rg[2],
label_text = 'Yrange Ymin:', command=self.setylimit,)
self.ymax = Pmw.EntryField(frame,labelpos=W,value=self.Rg[3],
label_text = ' Ymax:', command=self.setylimit)
self.ymin.grid(row=2,column=1,sticky=W)
self.ymax.grid(row=2,column=2,sticky=W)
self.xcid = Pmw.EntryField(frame,labelpos=W,value='0',
command=self.setxcid,
label_text = 'Column Data: X col #:')
self.ycid = Pmw.EntryField(frame,labelpos=W,value='1',
label_text = ' Start Y col #:')
self.xrid = Pmw.EntryField(frame,labelpos=W,value='-1',
command=self.setxrid,
label_text = ' Row data: X row #:')
self.yrid = Pmw.EntryField(frame,labelpos=W,value='0',
label_text = ' Start Y row #:')
self.xcid.grid(row=3,column=1,sticky=W)
self.ycid.grid(row=4,column=1,sticky=W)
self.xrid.grid(row=5,column=1,sticky=W)
self.yrid.grid(row=6,column=1,sticky=W)
frame.pack()
def startup(self):
"startup(self) - initialize variables at object plot1d creation"
from plotAscii import readST,loadpvs,initSH
self.CBframe = -1
self.nc = -1
self.fig = 0
self.symOn = 0
self.legOn = 1
self.spp = 1
self.styOn = 0
self.legloc = 0
self.pvs = loadpvs()
self.linestyles = linestyles
self.colors = colors
self.symbols = symbols
self.SH = initSH()
self.stdFrame = None
self.fitFrame = None
self.histFrame = None
self.errFrame = None
self.legFrame = None
self.Fit = None
self.setXFrame = None
if os.path.isfile('plot1d.config'):
lines = readST('plot1d.config')
self.fname = lines[0]
if len(lines[0]) >2:
pth,fnm = os.path.split(lines[0])
self.txtpath = pth
else :
self.txtpath='.'
self.title = lines[1]
self.xlabel = lines[2]
self.ylabel = lines[3]
self.mdapath = lines[4]
self.Rg=['0','100','0','100']
else:
self.fname=''
self.txtpath='.'
self.mdapath='.'
self.title=''
self.xlabel=''
self.ylabel=''
self.Rg=['0','100','0','100']
def createInterface(self):
"createInterface(self) - plot1d object creation"
AppShell.AppShell.createInterface(self)
self.createButtons()
self.addMoreMenuBar()
self.startup()
self.toggleSymVar.set(self.symOn)
self.toggleLegVar.set(self.legOn)
self.createFields()
if os.path.isfile(self.fname):
from plotAscii import readArray
data = readArray(self.fname)
self.data = data
self.columndata()
if __name__ == '__main__':
plt = plot1d()
plt.run()
| bsd-2-clause |
kimasx/smapp-toolkit | examples/plot_user_per_day_histogram.py | 2 | 3653 | """
Script makes users-per-day histogram going N days back.
Usage:
python plot_user_per_day_histograms.py -s smapp.politics.fas.nyu.edu -p 27011 -u smapp_readOnly -w SECRETPASSWORD -d USElection2016Hillary --days 10 --output-file hillary.png
@jonathanronen 2015/4
"""
import pytz
import argparse
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from collections import defaultdict
from datetime import datetime, timedelta
from smapp_toolkit.twitter import MongoTweetCollection
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--server', default='smapp.politics.fas.nyu.edu', help="Mongodb server address ['smapp.politics.fas.nyu.edu]")
parser.add_argument('-p', '--port', type=int, default=27011, help='Mongodb server port [27011]')
parser.add_argument('-u', '--user', help='Mongodb username [None]')
parser.add_argument('-w', '--password', help='Mongodb password [None')
parser.add_argument('-d', '--database', help='Mongodb database name [None]')
parser.add_argument('--days', default=7, help='How many days to go back [7]')
parser.add_argument('--timezone', default='America/New_York', help='Time zone to consider [America/New_York]')
parser.add_argument('--output-file', default='histogram.png', help='Output file [histogram.png]')
args = parser.parse_args()
print("Generating avg tweets/user/day histogram for {}".format(args.database))
TIMEZONE = pytz.timezone(args.timezone)
print("Days will be split according to time zone {}".format(args.timezone))
today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=TIMEZONE)
n_days_ago = today - timedelta(days=args.days)
print("The period being considered is {} to {}".format(
n_days_ago.strftime('%Y-%m-%d'),
today.strftime('%Y-%m-%d')))
print("Connecting to database")
collection = MongoTweetCollection(args.server, args.port, args.user, args.password, args.database)
ntweets = collection.since(n_days_ago).until(today).count()
print("Considering {} tweets".format(ntweets))
userids = set()
counts = dict()
for i in range(args.days):
day_counts = defaultdict(lambda: 0)
day_start = n_days_ago + i*timedelta(days=1)
day_end = n_days_ago + (i+1)*timedelta(days=1)
print("Counting for {}".format(day_start.strftime('%Y-%m-%d')))
for tweet in collection.since(day_start).until(day_end):
day_counts[tweet['user']['id']] += 1
userids.add(tweet['user']['id'])
counts[day_start] = day_counts
print("Done getting data from database.")
#### AVERAGE TWEETS PER DAY COUNTS (how many users tweeted x times per day on average)
user_avg_daily_tweets = { user: np.mean([counts[day][user] for day in counts]) for user in userids }
fig = plt.figure(figsize=(10,8))
plt.subplot(212)
counts = np.log(user_avg_daily_tweets.values())
bins = np.linspace(0, max(counts), max(counts)*10+1)
plt.hist(counts, bins, color='r', alpha=.6)
plt.ylabel('Num users')
plt.xlabel('log(avg tweets per day)')
plt.subplot(211)
plt.title('Average number of tweets per day for users\n{}\n {} to {}'.format(
args.database,
n_days_ago.strftime('%Y-%m-%d'),
today.strftime('%Y-%m-%d')))
counts = np.array(user_avg_daily_tweets.values())
bins = np.linspace(0, max(counts), max(counts)+1)
plt.hist(counts, bins, color='r', alpha=.6)
plt.ylabel('Num users')
plt.xlabel('avg tweets per day')
plt.tight_layout()
plt.savefig(args.output_file)
print("Done.")
| gpl-2.0 |
weidel-p/nest-simulator | pynest/examples/spatial/grid_iaf_oc.py | 12 | 1781 | # -*- coding: utf-8 -*-
#
# grid_iaf_oc.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create three populations of iaf_psc_alpha neurons on a 4x3 grid, each with different center
-------------------------------------------------------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
import numpy as np
for ctr in [(0.0, 0.0), (-2.0, 2.0), (0.5, 1.0)]:
plt.figure()
nest.ResetKernel()
l1 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[4, 3], extent=[2., 1.5],
center=ctr))
nest.PlotLayer(l1, nodesize=50, fig=plt.gcf())
# beautify
plt.axis([-3, 3, -3, 3])
plt.axes().set_aspect('equal', 'box')
plt.axes().set_xticks(np.arange(-3.0, 3.1, 1.0))
plt.axes().set_yticks(np.arange(-3.0, 3.1, 1.0))
plt.grid(True)
plt.xlabel('4 Columns, Extent: 1.5, Center: %.1f' % ctr[0])
plt.ylabel('2 Rows, Extent: 1.0, Center: %.1f' % ctr[1])
plt.show()
# plt.savefig('grid_iaf_oc_{}_{}.png'.format(ctr[0], ctr[1]))
| gpl-2.0 |
IshankGulati/scikit-learn | benchmarks/bench_plot_neighbors.py | 101 | 6469 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
plt.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = plt.subplot(sbplt, yscale='log')
plt.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = plt.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = plt.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
plt.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
plt.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
plt.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
plt.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
plt.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
plt.show()
| bsd-3-clause |
herilalaina/scikit-learn | benchmarks/bench_lasso.py | 111 | 3364 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import matplotlib.pyplot as plt
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
plt.figure('scikit-learn LASSO benchmark results')
plt.subplot(211)
plt.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
plt.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
plt.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features,
alpha))
plt.legend(loc='upper left')
plt.xlabel('number of samples')
plt.ylabel('Time (s)')
plt.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
plt.subplot(212)
plt.plot(list_n_features, lasso_results, 'b-', label='Lasso')
plt.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
plt.title('%d samples, alpha=%s' % (n_samples, alpha))
plt.legend(loc='upper left')
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
jayflo/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
dharmasam9/moose-core | python/rdesigneur/rmoogli.py | 1 | 6252 | # -*- coding: utf-8 -*-
#########################################################################
## rdesigneur0_4.py ---
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU General Public License version 2 or later.
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import matplotlib
import sys
import moose
import os
# Check if DISPLAY environment variable is properly set. If not, warn the user
# and continue.
hasDisplay = True
display = os.environ.get('DISPLAY', '' )
if not display:
hasDisplay = False
print( "Warning: Environment variable DISPLAY is not set."
" Did you forget to pass -X or -Y switch to ssh command?\n"
"Anyway, MOOSE will continue without graphics.\n"
)
hasMoogli = True
if hasDisplay:
try:
from PyQt4 import QtGui
import moogli
import moogli.extensions.moose
app = QtGui.QApplication(sys.argv)
except Exception as e:
print( 'Warning: Moogli not found. All moogli calls will use dummy functions' )
hasMoogli = False
runtime = 0.0
moogliDt = 1.0
rotation = math.pi / 500.0
def getComptParent( obj ):
k = moose.element(obj)
while not k.isA[ "CompartmentBase" ]:
if k == moose.element( '/' ):
return k.path
k = moose.element( k.parent )
return k.path
#######################################################################
## Here we set up the callback functions for the viewer
def prelude( view ):
view.home()
view.pitch( math.pi / 2.0 )
view.zoom( 0.05 )
#network.groups["soma"].set( "color", moogli.colors.RED )
# This func is used for the first viewer, it has to handle advancing time.
def interlude( view ):
moose.start( moogliDt )
val = [ moose.getField( i, view.mooField, "double" ) * view.mooScale for i in view.mooObj ]
view.mooGroup.set("color", val, view.mapper)
view.yaw( rotation )
#print moogliDt, len( val ), runtime
if moose.element("/clock").currentTime >= runtime:
view.stop()
# This func is used for later viewers, that don't handle advancing time.
def interlude2( view ):
val = [ moose.getField( i, view.mooField, "double" ) * view.mooScale for i in view.mooObj ]
view.mooGroup.set("color", val, view.mapper)
view.yaw( rotation )
if moose.element("/clock").currentTime >= runtime:
view.stop()
def postlude( view ):
view.rd.display()
def makeMoogli( rd, mooObj, moogliEntry, fieldInfo ):
if not hasMoogli:
return None
mooField = moogliEntry[3]
numMoogli = len( mooObj )
network = moogli.extensions.moose.read( path = rd.elecid.path, vertices=15)
#print len( network.groups["spine"].shapes )
#print len( network.groups["dendrite"].shapes )
#print len( network.groups["soma"].shapes )
#soma = network.groups["soma"].shapes[ '/model/elec/soma']
#print network.groups["soma"].shapes
soma = network.groups["soma"].shapes[ rd.elecid.path + '/soma[0]']
if ( mooField == 'n' or mooField == 'conc' ):
updateGroup = soma.subdivide( numMoogli )
displayObj = mooObj
else:
shell = moose.element( '/' )
displayObj = [i for i in mooObj if i != shell ]
cpa = [getComptParent( i ) for i in displayObj ]
updateGroup = moogli.Group( "update" )
updateShapes = [network.shapes[i] for i in cpa]
#print "########### Len( cpa, mooObj ) = ", len( cpa ), len( mooObj ), len( updateShapes )
updateGroup.attach_shapes( updateShapes )
normalizer = moogli.utilities.normalizer(
moogliEntry[5], moogliEntry[6],
clipleft =True,
clipright = True )
colormap = moogli.colors.MatplotlibColorMap(matplotlib.cm.rainbow)
mapper = moogli.utilities.mapper(colormap, normalizer)
viewer = moogli.Viewer("Viewer")
viewer.setWindowTitle( moogliEntry[4] )
if ( mooField == 'n' or mooField == 'conc' ):
viewer.attach_shapes( updateGroup.shapes.values())
viewer.detach_shape(soma)
else:
viewer.attach_shapes(network.shapes.values())
if len( rd.moogNames ) == 0:
view = moogli.View("main-view",
prelude=prelude,
interlude=interlude,
postlude = postlude)
else:
view = moogli.View("main-view",
prelude=prelude,
interlude=interlude2)
cb = moogli.widgets.ColorBar(id="cb",
title=fieldInfo[3],
text_color=moogli.colors.BLACK,
position=moogli.geometry.Vec3f(0.975, 0.5, 0.0),
size=moogli.geometry.Vec3f(0.30, 0.05, 0.0),
text_font="/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-R.ttf",
orientation=math.pi / 2.0,
text_character_size=16,
label_formatting_precision=0,
colormap=moogli.colors.MatplotlibColorMap(matplotlib.cm.rainbow),
color_resolution=100,
scalar_range=moogli.geometry.Vec2f(
moogliEntry[5],
moogliEntry[6]))
cb.set_num_labels(3)
view.attach_color_bar(cb)
view.rd = rd
view.mooObj = displayObj
view.mooGroup = updateGroup
view.mooField = mooField
view.mooScale = fieldInfo[2]
view.mapper = mapper
viewer.attach_view(view)
return viewer
def displayMoogli( rd, _dt, _runtime, _rotation ):
if not hasMoogli:
return None
global runtime
global moogliDt
global rotation
runtime = _runtime
moogliDt = _dt
rotation = _rotation
for i in rd.moogNames:
i.show()
i.start()
#viewer.showMaximized()
#viewer.show()
#viewer.start()
return app.exec_()
| gpl-3.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/series/indexing/test_loc.py | 2 | 4378 | import numpy as np
import pytest
import pandas as pd
from pandas import Series, Timestamp
from pandas.util.testing import assert_series_equal
@pytest.mark.parametrize("val,expected", [(2 ** 63 - 1, 3), (2 ** 63, 4)])
def test_loc_uint64(val, expected):
# see gh-19399
s = Series({2 ** 63 - 1: 3, 2 ** 63: 4})
assert s.loc[val] == expected
def test_loc_getitem(test_data):
inds = test_data.series.index[[3, 4, 7]]
assert_series_equal(test_data.series.loc[inds], test_data.series.reindex(inds))
assert_series_equal(test_data.series.iloc[5::2], test_data.series[5::2])
# slice with indices
d1, d2 = test_data.ts.index[[5, 15]]
result = test_data.ts.loc[d1:d2]
expected = test_data.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = test_data.series > test_data.series.median()
assert_series_equal(test_data.series.loc[mask], test_data.series[mask])
# ask for index value
assert test_data.ts.loc[d1] == test_data.ts[d1]
assert test_data.ts.loc[d2] == test_data.ts[d2]
def test_loc_getitem_not_monotonic(test_data):
d1, d2 = test_data.ts.index[[5, 15]]
ts2 = test_data.ts[::2][[1, 2, 0]]
msg = r"Timestamp\('2000-01-10 00:00:00'\)"
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2]
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2] = 0
def test_loc_getitem_setitem_integer_slice_keyerrors():
s = Series(np.random.randn(10), index=list(range(0, 20, 2)))
# this is OK
cp = s.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).all()
# so is this
cp = s.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = s.iloc[2:6]
result2 = s.loc[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[list(range(5)) + list(range(9, 4, -1))]
with pytest.raises(KeyError, match=r"^3$"):
s2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
s2.loc[3:11] = 0
def test_loc_getitem_iterator(test_data):
idx = iter(test_data.series.index[:10])
result = test_data.series.loc[idx]
assert_series_equal(result, test_data.series[:10])
def test_loc_setitem_boolean(test_data):
mask = test_data.series > test_data.series.median()
result = test_data.series.copy()
result.loc[mask] = 0
expected = test_data.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_loc_setitem_corner(test_data):
inds = list(test_data.series.index[[5, 8, 12]])
test_data.series.loc[inds] = 5
msg = r"\['foo'\] not in index"
with pytest.raises(KeyError, match=msg):
test_data.series.loc[inds + ["foo"]] = 5
def test_basic_setitem_with_labels(test_data):
indices = test_data.ts.index[[5, 10, 15]]
cp = test_data.ts.copy()
exp = test_data.ts.copy()
cp[indices] = 0
exp.loc[indices] = 0
assert_series_equal(cp, exp)
cp = test_data.ts.copy()
exp = test_data.ts.copy()
cp[indices[0] : indices[2]] = 0
exp.loc[indices[0] : indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=list(range(0, 20, 2)))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.loc[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.loc[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
msg = r"\[5\] not contained in the index"
with pytest.raises(ValueError, match=msg):
s[inds_notfound] = 0
with pytest.raises(Exception, match=msg):
s[arr_inds_notfound] = 0
# GH12089
# with tz for values
s = Series(
pd.date_range("2011-01-01", periods=3, tz="US/Eastern"), index=["a", "b", "c"]
)
s2 = s.copy()
expected = Timestamp("2011-01-03", tz="US/Eastern")
s2.loc["a"] = expected
result = s2.loc["a"]
assert result == expected
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
assert result == expected
s2 = s.copy()
s2["a"] = expected
result = s2["a"]
assert result == expected
| apache-2.0 |
juliusbierk/scikit-image | doc/ext/plot2rst.py | 13 | 20439 | """
Example generation from python files.
Generate the rst files for the examples by iterating over the python
example files. Files that generate images should start with 'plot'.
To generate your own examples, add this extension to the list of
``extensions``in your Sphinx configuration file. In addition, make sure the
example directory(ies) in `plot2rst_paths` (see below) points to a directory
with examples named `plot_*.py` and include an `index.rst` file.
This code was adapted from scikit-image, which took it from scikit-learn.
Options
-------
The ``plot2rst`` extension accepts the following options:
plot2rst_paths : length-2 tuple, or list of tuples
Tuple or list of tuples of paths to (python plot, generated rst) files,
i.e. (source, destination). Note that both paths are relative to Sphinx
'source' directory. Defaults to ('../examples', 'auto_examples')
plot2rst_rcparams : dict
Matplotlib configuration parameters. See
http://matplotlib.sourceforge.net/users/customizing.html for details.
plot2rst_default_thumb : str
Path (relative to doc root) of default thumbnail image.
plot2rst_thumb_shape : float
Shape of thumbnail in pixels. The image is resized to fit within this shape
and the excess is filled with white pixels. This fixed size ensures that
that gallery images are displayed in a grid.
plot2rst_plot_tag : str
When this tag is found in the example file, the current plot is saved and
tag is replaced with plot path. Defaults to 'PLOT2RST.current_figure'.
Suggested CSS definitions
-------------------------
div.body h2 {
border-bottom: 1px solid #BBB;
clear: left;
}
/*---- example gallery ----*/
.gallery.figure {
float: left;
margin: 1em;
}
.gallery.figure img{
display: block;
margin-left: auto;
margin-right: auto;
width: 200px;
}
.gallery.figure .caption {
width: 200px;
text-align: center !important;
}
"""
import os
import re
import shutil
import token
import tokenize
import traceback
import itertools
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from skimage import io
from skimage import transform
from skimage.util.dtype import dtype_range
from notebook import Notebook
from docutils.core import publish_parts
from sphinx.domains.python import PythonDomain
LITERALINCLUDE = """
.. literalinclude:: {src_name}
:lines: {code_start}-
"""
CODE_LINK = """
**Python source code:** :download:`download <{0}>`
(generated using ``skimage`` |version|)
"""
NOTEBOOK_LINK = """
**IPython Notebook:** :download:`download <{0}>`
(generated using ``skimage`` |version|)
"""
TOCTREE_TEMPLATE = """
.. toctree::
:hidden:
%s
"""
IMAGE_TEMPLATE = """
.. image:: images/%s
:align: center
"""
GALLERY_IMAGE_TEMPLATE = """
.. figure:: %(thumb)s
:figclass: gallery
:target: ./%(source)s.html
:ref:`example_%(link_name)s`
"""
class Path(str):
"""Path object for manipulating directory and file paths."""
def __new__(self, path):
return str.__new__(self, path)
@property
def isdir(self):
return os.path.isdir(self)
@property
def exists(self):
"""Return True if path exists"""
return os.path.exists(self)
def pjoin(self, *args):
"""Join paths. `p` prefix prevents confusion with string method."""
return self.__class__(os.path.join(self, *args))
def psplit(self):
"""Split paths. `p` prefix prevents confusion with string method."""
return [self.__class__(p) for p in os.path.split(self)]
def makedirs(self):
if not self.exists:
os.makedirs(self)
def listdir(self):
return os.listdir(self)
def format(self, *args, **kwargs):
return self.__class__(super(Path, self).format(*args, **kwargs))
def __add__(self, other):
return self.__class__(super(Path, self).__add__(other))
def __iadd__(self, other):
return self.__add__(other)
def setup(app):
app.connect('builder-inited', generate_example_galleries)
app.add_config_value('plot2rst_paths',
('../examples', 'auto_examples'), True)
app.add_config_value('plot2rst_rcparams', {}, True)
app.add_config_value('plot2rst_default_thumb', None, True)
app.add_config_value('plot2rst_thumb_shape', (250, 300), True)
app.add_config_value('plot2rst_plot_tag', 'PLOT2RST.current_figure', True)
app.add_config_value('plot2rst_index_name', 'index', True)
def generate_example_galleries(app):
cfg = app.builder.config
if isinstance(cfg.source_suffix, list):
cfg.source_suffix_str = cfg.source_suffix[0]
else:
cfg.source_suffix_str = cfg.source_suffix
doc_src = Path(os.path.abspath(app.builder.srcdir)) # path/to/doc/source
if isinstance(cfg.plot2rst_paths, tuple):
cfg.plot2rst_paths = [cfg.plot2rst_paths]
for src_dest in cfg.plot2rst_paths:
plot_path, rst_path = [Path(p) for p in src_dest]
example_dir = doc_src.pjoin(plot_path)
rst_dir = doc_src.pjoin(rst_path)
generate_examples_and_gallery(example_dir, rst_dir, cfg)
def generate_examples_and_gallery(example_dir, rst_dir, cfg):
"""Generate rst from examples and create gallery to showcase examples."""
if not example_dir.exists:
print("No example directory found at", example_dir)
return
rst_dir.makedirs()
# we create an index.rst with all examples
gallery_index = open(rst_dir.pjoin('index'+cfg.source_suffix_str), 'w')
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
write_gallery(gallery_index, example_dir, rst_dir, cfg)
for d in sorted(example_dir.listdir()):
example_sub = example_dir.pjoin(d)
if example_sub.isdir:
rst_sub = rst_dir.pjoin(d)
rst_sub.makedirs()
write_gallery(gallery_index, example_sub, rst_sub, cfg, depth=1)
gallery_index.flush()
def write_gallery(gallery_index, src_dir, rst_dir, cfg, depth=0):
"""Generate the rst files for an example directory, i.e. gallery.
Write rst files from python examples and add example links to gallery.
Parameters
----------
gallery_index : file
Index file for plot gallery.
src_dir : 'str'
Source directory for python examples.
rst_dir : 'str'
Destination directory for rst files generated from python examples.
cfg : config object
Sphinx config object created by Sphinx.
"""
index_name = cfg.plot2rst_index_name + cfg.source_suffix_str
gallery_template = src_dir.pjoin(index_name)
if not os.path.exists(gallery_template):
print(src_dir)
print(80*'_')
print('Example directory %s does not have a %s file'
% (src_dir, index_name))
print('Skipping this directory')
print(80*'_')
return
gallery_description = open(gallery_template).read()
gallery_index.write('\n\n%s\n\n' % gallery_description)
rst_dir.makedirs()
examples = [fname for fname in sorted(src_dir.listdir(), key=_plots_first)
if fname.endswith('py')]
ex_names = [ex[:-3] for ex in examples] # strip '.py' extension
if depth == 0:
sub_dir = Path('')
else:
sub_dir_list = src_dir.psplit()[-depth:]
sub_dir = Path('/'.join(sub_dir_list) + '/')
joiner = '\n %s' % sub_dir
gallery_index.write(TOCTREE_TEMPLATE % (sub_dir + joiner.join(ex_names)))
for src_name in examples:
try:
write_example(src_name, src_dir, rst_dir, cfg)
except Exception:
print("Exception raised while running:")
print("%s in %s" % (src_name, src_dir))
print('~' * 60)
traceback.print_exc()
print('~' * 60)
continue
link_name = sub_dir.pjoin(src_name)
link_name = link_name.replace(os.path.sep, '_')
if link_name.startswith('._'):
link_name = link_name[2:]
info = {}
info['thumb'] = sub_dir.pjoin('images/thumb', src_name[:-3] + '.png')
info['source'] = sub_dir + src_name[:-3]
info['link_name'] = link_name
gallery_index.write(GALLERY_IMAGE_TEMPLATE % info)
def _plots_first(fname):
"""Decorate filename so that examples with plots are displayed first."""
if not (fname.startswith('plot') and fname.endswith('.py')):
return 'zz' + fname
return fname
def write_example(src_name, src_dir, rst_dir, cfg):
"""Write rst file from a given python example.
Parameters
----------
src_name : str
Name of example file.
src_dir : 'str'
Source directory for python examples.
rst_dir : 'str'
Destination directory for rst files generated from python examples.
cfg : config object
Sphinx config object created by Sphinx.
"""
last_dir = src_dir.psplit()[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = Path('')
else:
last_dir += '_'
src_path = src_dir.pjoin(src_name)
example_file = rst_dir.pjoin(src_name)
shutil.copyfile(src_path, example_file)
image_dir = rst_dir.pjoin('images')
thumb_dir = image_dir.pjoin('thumb')
notebook_dir = rst_dir.pjoin('notebook')
image_dir.makedirs()
thumb_dir.makedirs()
notebook_dir.makedirs()
base_image_name = os.path.splitext(src_name)[0]
image_path = image_dir.pjoin(base_image_name + '_{0}.png')
basename, py_ext = os.path.splitext(src_name)
rst_path = rst_dir.pjoin(basename + cfg.source_suffix_str)
notebook_path = notebook_dir.pjoin(basename + '.ipynb')
if _plots_are_current(src_path, image_path) and rst_path.exists and \
notebook_path.exists:
return
print('plot2rst: %s' % basename)
blocks = split_code_and_text_blocks(example_file)
if blocks[0][2].startswith('#!'):
blocks.pop(0) # don't add shebang line to rst file.
rst_link = '.. _example_%s:\n\n' % (last_dir + src_name)
figure_list, rst = process_blocks(blocks, src_path, image_path, cfg)
has_inline_plots = any(cfg.plot2rst_plot_tag in b[2] for b in blocks)
if has_inline_plots:
example_rst = ''.join([rst_link, rst])
else:
# print first block of text, display all plots, then display code.
first_text_block = [b for b in blocks if b[0] == 'text'][0]
label, (start, end), content = first_text_block
figure_list = save_all_figures(image_path)
rst_blocks = [IMAGE_TEMPLATE % f.lstrip('/') for f in figure_list]
example_rst = rst_link
example_rst += eval(content)
example_rst += ''.join(rst_blocks)
code_info = dict(src_name=src_name, code_start=end)
example_rst += LITERALINCLUDE.format(**code_info)
example_rst += CODE_LINK.format(src_name)
ipnotebook_name = src_name.replace('.py', '.ipynb')
ipnotebook_name = './notebook/' + ipnotebook_name
example_rst += NOTEBOOK_LINK.format(ipnotebook_name)
f = open(rst_path, 'w')
f.write(example_rst)
f.flush()
thumb_path = thumb_dir.pjoin(src_name[:-3] + '.png')
first_image_file = image_dir.pjoin(figure_list[0].lstrip('/'))
if first_image_file.exists:
first_image = io.imread(first_image_file)
save_thumbnail(first_image, thumb_path, cfg.plot2rst_thumb_shape)
if not thumb_path.exists:
if cfg.plot2rst_default_thumb is None:
print("WARNING: No plots found and default thumbnail not defined.")
print("Specify 'plot2rst_default_thumb' in Sphinx config file.")
else:
shutil.copy(cfg.plot2rst_default_thumb, thumb_path)
# Export example to IPython notebook
nb = Notebook()
# Add sphinx roles to the examples, otherwise docutils
# cannot compile the ReST for the notebook
sphinx_roles = PythonDomain.roles.keys()
preamble = '\n'.join('.. role:: py:{0}(literal)\n'.format(role)
for role in sphinx_roles)
# Grab all references to inject them in cells where needed
ref_regexp = re.compile('\n(\.\. \[(\d+)\].*(?:\n[ ]{7,8}.*)+)')
math_role_regexp = re.compile(':math:`(.*?)`')
text = '\n'.join((content for (cell_type, _, content) in blocks
if cell_type != 'code'))
references = re.findall(ref_regexp, text)
for (cell_type, _, content) in blocks:
if cell_type == 'code':
nb.add_cell(content, cell_type='code')
else:
if content.startswith('r'):
content = content.replace('r"""', '')
escaped = False
else:
content = content.replace('"""', '')
escaped = True
if not escaped:
content = content.replace("\\", "\\\\")
content = content.replace('.. seealso::', '**See also:**')
content = re.sub(math_role_regexp, r'$\1$', content)
# Remove math directive when rendering notebooks
# until we implement a smarter way of capturing and replacing
# its content
content = content.replace('.. math::', '')
if not content.strip():
continue
content = (preamble + content).rstrip('\n')
content = '\n'.join([line for line in content.split('\n') if
not line.startswith('.. image')])
# Remove reference links until we can figure out a better way to
# preserve them
for (reference, ref_id) in references:
ref_tag = '[{0}]_'.format(ref_id)
if ref_tag in content:
content = content.replace(ref_tag, ref_tag[:-1])
html = publish_parts(content, writer_name='html')['html_body']
nb.add_cell(html, cell_type='markdown')
with open(notebook_path, 'w') as f:
f.write(nb.json())
def save_thumbnail(image, thumb_path, shape):
"""Save image as a thumbnail with the specified shape.
The image is first resized to fit within the specified shape and then
centered in an array of the specified shape before saving.
"""
rescale = min(float(w_1) / w_2 for w_1, w_2 in zip(shape, image.shape))
small_shape = (rescale * np.asarray(image.shape[:2])).astype(int)
small_image = transform.resize(image, small_shape)
if len(image.shape) == 3:
shape = shape + (image.shape[2],)
background_value = dtype_range[small_image.dtype.type][1]
thumb = background_value * np.ones(shape, dtype=small_image.dtype)
i = (shape[0] - small_shape[0]) // 2
j = (shape[1] - small_shape[1]) // 2
thumb[i:i+small_shape[0], j:j+small_shape[1]] = small_image
io.imsave(thumb_path, thumb)
def _plots_are_current(src_path, image_path):
first_image_file = Path(image_path.format(1))
needs_replot = (not first_image_file.exists or
_mod_time(first_image_file) <= _mod_time(src_path))
return not needs_replot
def _mod_time(file_path):
return os.stat(file_path).st_mtime
def split_code_and_text_blocks(source_file):
"""Return list with source file separated into code and text blocks.
Returns
-------
blocks : list of (label, (start, end+1), content)
List where each element is a tuple with the label ('text' or 'code'),
the (start, end+1) line numbers, and content string of block.
"""
block_edges, idx_first_text_block = get_block_edges(source_file)
with open(source_file) as f:
source_lines = f.readlines()
# Every other block should be a text block
idx_text_block = np.arange(idx_first_text_block, len(block_edges), 2)
blocks = []
slice_ranges = zip(block_edges[:-1], block_edges[1:])
for i, (start, end) in enumerate(slice_ranges):
block_label = 'text' if i in idx_text_block else 'code'
# subtract 1 from indices b/c line numbers start at 1, not 0
content = ''.join(source_lines[start-1:end-1])
blocks.append((block_label, (start, end), content))
return blocks
def get_block_edges(source_file):
"""Return starting line numbers of code and text blocks
Returns
-------
block_edges : list of int
Line number for the start of each block. Note the
idx_first_text_block : {0 | 1}
0 if first block is text then, else 1 (second block better be text).
"""
block_edges = []
with open(source_file) as f:
token_iter = tokenize.generate_tokens(f.readline)
for token_tuple in token_iter:
t_id, t_str, (srow, scol), (erow, ecol), src_line = token_tuple
if (token.tok_name[t_id] == 'STRING' and scol == 0):
# Add one point to line after text (for later slicing)
block_edges.extend((srow, erow+1))
idx_first_text_block = 0
# when example doesn't start with text block.
if not block_edges[0] == 1:
block_edges.insert(0, 1)
idx_first_text_block = 1
# when example doesn't end with text block.
if not block_edges[-1] == erow: # iffy: I'm using end state of loop
block_edges.append(erow)
return block_edges, idx_first_text_block
def process_blocks(blocks, src_path, image_path, cfg):
"""Run source, save plots as images, and convert blocks to rst.
Parameters
----------
blocks : list of block tuples
Code and text blocks from example. See `split_code_and_text_blocks`.
src_path : str
Path to example file.
image_path : str
Path where plots are saved (format string which accepts figure number).
cfg : config object
Sphinx config object created by Sphinx.
Returns
-------
figure_list : list
List of figure names saved by the example.
rst_text : str
Text with code wrapped code-block directives.
"""
src_dir, src_name = src_path.psplit()
if not src_name.startswith('plot'):
return [], ''
# index of blocks which have inline plots
inline_tag = cfg.plot2rst_plot_tag
idx_inline_plot = [i for i, b in enumerate(blocks)
if inline_tag in b[2]]
image_dir, image_fmt_str = image_path.psplit()
figure_list = []
plt.rcdefaults()
plt.rcParams.update(cfg.plot2rst_rcparams)
plt.close('all')
example_globals = {}
rst_blocks = []
fig_num = 1
for i, (blabel, brange, bcontent) in enumerate(blocks):
if blabel == 'code':
exec(bcontent, example_globals)
rst_blocks.append(codestr2rst(bcontent))
else:
if i in idx_inline_plot:
plt.savefig(image_path.format(fig_num))
figure_name = image_fmt_str.format(fig_num)
fig_num += 1
figure_list.append(figure_name)
figure_link = os.path.join('images', figure_name)
bcontent = bcontent.replace(inline_tag, figure_link)
rst_blocks.append(docstr2rst(bcontent))
return figure_list, '\n'.join(rst_blocks)
def codestr2rst(codestr):
"""Return reStructuredText code block from code string"""
code_directive = ".. code-block:: python\n\n"
indented_block = '\t' + codestr.replace('\n', '\n\t')
return code_directive + indented_block
def docstr2rst(docstr):
"""Return reStructuredText from docstring"""
idx_whitespace = len(docstr.rstrip()) - len(docstr)
whitespace = docstr[idx_whitespace:]
return eval(docstr) + whitespace
def save_all_figures(image_path):
"""Save all matplotlib figures.
Parameters
----------
image_path : str
Path where plots are saved (format string which accepts figure number).
"""
figure_list = []
image_dir, image_fmt_str = image_path.psplit()
fig_mngr = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_num in (m.num for m in fig_mngr):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path.format(fig_num))
figure_list.append(image_fmt_str.format(fig_num))
return figure_list
| bsd-3-clause |
tosolveit/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 30 | 4727 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(tf.test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with tf.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with tf.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with tf.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with tf.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
| gpl-3.0 |
sumspr/scikit-learn | examples/plot_kernel_ridge_regression.py | 230 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
dursobr/Pythics | pythics/start.py | 1 | 27005 | # -*- coding: utf-8 -*-
#
# Copyright 2008 - 2019 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
#
# load libraries
#
import getopt
import inspect
import logging
import os, os.path
import multiprocessing
import pickle
import sys, traceback
from pythics.settings import _TRY_PYSIDE
try:
if not _TRY_PYSIDE:
raise ImportError()
import PySide2.QtCore as _QtCore
import PySide2.QtGui as _QtGui
import PySide2.QtWidgets as _QtWidgets
import PySide2.QtPrintSupport as _QtPrintSupport
QtCore = _QtCore
QtGui = _QtGui
QtWidgets = _QtWidgets
QtPrintSupport = _QtPrintSupport
Signal = QtCore.Signal
Slot = QtCore.Slot
Property = QtCore.Property
USES_PYSIDE = True
except ImportError:
import PyQt5.QtCore as _QtCore
import PyQt5.QtGui as _QtGui
import PyQt5.QtWidgets as _QtWidgets
import PyQt5.QtPrintSupport as _QtPrintSupport
QtCore = _QtCore
QtGui = _QtGui
QtWidgets = _QtWidgets
QtPrintSupport = _QtPrintSupport
Signal = QtCore.pyqtSignal
Slot = QtCore.pyqtSlot
Property = QtCore.pyqtProperty
USES_PYSIDE = False
import pythics.html
import pythics.libcontrol
import pythics.parent
#
# Application top level window
# one for the whole application
# parent of all TabFrame instances
#
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent_process, app, parent=None, compact=False):
super(MainWindow, self).__init__(parent)
# pythics data
self.parent_process = parent_process
self.app = app
self.compact = compact
self.fixed_tabs = compact
self.workspace = ''
self.shutdown_on_exit = False
# setup window basics
#self.resize(900, 560)
# match raspberry pi touchscreen size
self.resize(800, 480)
self.setWindowTitle('Pythics')
self.clipboard = QtWidgets.QApplication.clipboard()
# set the corner icon
icon = QtGui.QIcon(os.path.join(sys.path[0], 'pythics_icon.ico'))
self.setWindowIcon(icon)
# add the menu bar
self.new_menu_bar()
# fill in the main window
self.new_tab_frame()
# add the status bar
self.new_status_bar()
# for printing later
self.printer = QtPrintSupport.QPrinter()
def confirm_exit(self):
reply = QtWidgets.QMessageBox.question(self, 'Confirm',
'Are you sure you want to exit?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
return True
else:
return False
def confirm_close(self):
reply = QtWidgets.QMessageBox.question(self, 'Confirm',
'Are you sure you want to close the app?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
return True
else:
return False
def closeEvent(self, event):
# called when the close button on the window is pushed
if self.confirm_exit():
self.shutdown()
event.accept()
else:
event.ignore()
def new_status_bar(self):
if not self.compact:
self.status_text = QtWidgets.QLabel('')
self.statusBar().addWidget(self.status_text, 1)
def set_status_text(self, value):
if not self.compact:
self.status_text.setText(value)
def new_tab_frame(self):
self.tab_frame = QtWidgets.QTabWidget()
self.tab_frame.setDocumentMode(True)
self.tab_frame.setTabsClosable(not self.fixed_tabs)
self.tab_frame.setMovable(not self.fixed_tabs)
self.tab_frame.currentChanged.connect(self.redraw)
self.tab_frame.tabCloseRequested.connect(self.close_tab)
self.setCentralWidget(self.tab_frame)
def redraw(self, index):
if self.tab_frame.widget(index) == None:
title = 'Pythics'
else:
title = self.tab_frame.widget(index).title
self.tab_frame.widget(index).redraw()
self.setWindowTitle(title)
def get_active_tab(self):
return self.tab_frame.currentWidget()
def close_tab(self, i):
if self.confirm_close():
self.tab_frame.widget(i).close()
self.tab_frame.removeTab(i)
if self.tab_frame.count() == 0:
self.disable_menu_items()
def get_open_filename(self, name_filter='*.*', directory='', title='Select a file to open'):
filename = QtWidgets.QFileDialog.getOpenFileName(self, title, directory, name_filter)[0]
if filename == '':
raise IOError('No file selected.')
return filename
def get_save_filename(self, name_filter='*.*', directory='', title='Select a filename for saving'):
filename = QtWidgets.QFileDialog.getSaveFileName(self, title, directory, name_filter)[0]
if filename == '':
raise IOError('No filename selected.')
return filename
def add_menu(self, name):
self.last_menu = self.menuBar().addMenu(name)
return self.last_menu
def add_menu_item(self, item_string, item_function, shortcut=0, tip=''):
action = self.last_menu.addAction(item_string, item_function, shortcut)
action.setStatusTip(tip)
return action
def add_menu_seperator(self):
self.last_menu.addSeparator()
def new_menu_bar(self):
# File menu
self.file_menu = self.add_menu('&File')
self.add_menu_item('&Open...', self.menu_open, 'Ctrl+O',
'Open an app file.')
self.add_menu_item('&Close', self.menu_close, 'Ctrl+W',
'Close the current app.')
self.add_menu_item('Close All', self.menu_close_all, 0,
'Close all open files.')
self.add_menu_item('&Reload', self.menu_reload, 'Ctrl+R',
'Reload the app.')
self.add_menu_seperator()
self.add_menu_item('Open Workspace...', self.menu_open_workspace, 0,
'Open a group of files (a workspace).')
self.add_menu_item('Save Workspace', self.menu_save_workspace, 0,
'Save open workspace.')
self.add_menu_item('Save Workspace As...', self.menu_save_workspace_as,
0, 'Save open files as a workspace.')
self.add_menu_seperator()
self.add_menu_item('Page Set&up...', self.menu_page_setup, 0,
'Page setup for printing.')
self.add_menu_item('Print Pre&view', self.menu_print_preview, 0,
'Preview pages to be printed.')
self.add_menu_item('&Print...', self.menu_print, 0,
'Print the current html.')
self.add_menu_seperator()
self.add_menu_item('E&xit', self.menu_quit, 0, 'Quit Pythics')
# Edit menu
self.edit_menu = self.add_menu('&Edit')
self.add_menu_item('Cu&t', self.menu_cut, 'Ctrl+X',
'Cut text to clipboard.')
self.add_menu_item('&Copy', self.menu_copy, 'Ctrl+C',
'Copy text to clipboard.')
self.add_menu_item('&Paste', self.menu_paste, 'Ctrl+V',
'Paste text from clipboard.')
self.add_menu_item('Delete', self.menu_delete, 0,
'Delete selected text.')
# Parameters menu
self.param_menu = self.add_menu('&Parameters')
self.add_menu_item('Load Defaults', self.menu_load_parameters_defaults,
0, 'Load default parameters.')
self.add_menu_item('Load...', self.menu_load_parameters, 0,
'Load parameter file')
self.add_menu_seperator()
self.add_menu_item('Save As Defaults',
self.menu_save_parameters_as_defaults,
0, 'Save parameters to default location.')
self.add_menu_item('Save As...', self.menu_save_parameters_as, 0,
'Save parameter file.')
# Help menu
if not self.fixed_tabs:
self.help_menu = self.add_menu('&Help')
self.add_menu_item('About Pythics...', self.menu_about,
0, '')
self.add_menu_item('Open Help', self.menu_help,
0, '')
self.disable_menu_items()
def disable_menu_items(self):
if self.fixed_tabs:
self.file_menu.actions()[0].setEnabled(False)
self.file_menu.actions()[5].setEnabled(False)
# disable menu items that require an open tab
self.file_menu.actions()[1].setEnabled(False)
self.file_menu.actions()[2].setEnabled(False)
self.file_menu.actions()[3].setEnabled(False)
self.file_menu.actions()[6].setEnabled(False)
self.file_menu.actions()[7].setEnabled(False)
self.file_menu.actions()[10].setEnabled(False)
self.file_menu.actions()[11].setEnabled(False)
self.param_menu.actions()[0].setEnabled(False)
self.param_menu.actions()[1].setEnabled(False)
self.param_menu.actions()[3].setEnabled(False)
self.param_menu.actions()[4].setEnabled(False)
def enable_menu_items(self):
# enable menu items that require an open tab
if self.fixed_tabs:
self.file_menu.actions()[3].setEnabled(True)
self.file_menu.actions()[10].setEnabled(True)
self.file_menu.actions()[11].setEnabled(True)
self.param_menu.actions()[0].setEnabled(True)
self.param_menu.actions()[1].setEnabled(True)
self.param_menu.actions()[3].setEnabled(True)
self.param_menu.actions()[4].setEnabled(True)
else:
self.file_menu.actions()[1].setEnabled(True)
self.file_menu.actions()[2].setEnabled(True)
self.file_menu.actions()[3].setEnabled(True)
self.file_menu.actions()[6].setEnabled(True)
self.file_menu.actions()[7].setEnabled(True)
self.file_menu.actions()[10].setEnabled(True)
self.file_menu.actions()[11].setEnabled(True)
self.param_menu.actions()[0].setEnabled(True)
self.param_menu.actions()[1].setEnabled(True)
self.param_menu.actions()[3].setEnabled(True)
self.param_menu.actions()[4].setEnabled(True)
def menu_open(self):
try:
filename = self.get_open_filename('xml (*.htm *.html *.xml)')
except IOError:
pass
else:
self.open_html_file(filename)
def menu_close(self):
if self.confirm_close():
self.get_active_tab().close()
self.tab_frame.removeTab(self.tab_frame.currentIndex())
if self.tab_frame.count() == 0:
self.disable_menu_items()
def menu_close_all(self):
reply = QtWidgets.QMessageBox.question(self, 'Confirm',
'Are you sure you want to close all tabs?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
while self.tab_frame.count() > 0:
self.get_active_tab().close()
self.tab_frame.removeTab(self.tab_frame.currentIndex())
if self.tab_frame.count() == 0:
self.disable_menu_items()
def menu_quit(self):
if self.confirm_exit():
self.shutdown()
self.app.quit()
if self.shutdown_on_exit:
os.system("shutdown -h now")
def menu_reload(self):
reply = QtWidgets.QMessageBox.question(self, 'Confirm',
'Are you sure you want to reload the app?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
tab_window = self.get_active_tab()
title = tab_window.reload_file()
index = self.tab_frame.currentIndex()
self.tab_frame.setTabText(index, title)
def menu_open_workspace(self):
try:
filename = self.get_open_filename('pickle file (*.pkl *.txt)')
except IOError:
pass
else:
self.open_workspace(filename)
self.workspace = filename
self.enable_menu_items()
def menu_save_workspace(self):
if self.workspace == '':
try:
filename = self.get_save_filename('*.pkl')
except IOError:
pass
else:
self.save_workspace(filename)
self.workspace = filename
else:
self.save_workspace(filename=self.workspace)
def menu_save_workspace_as(self):
try:
filename = self.get_save_filename('*.pkl')
except IOError:
pass
else:
self.save_workspace(filename)
self.workspace = filename
def menu_page_setup(self):
dialog = QtPrintSupport.QPageSetupDialog(self.printer)
dialog.exec_()
def menu_print_preview(self):
dialog = QtPrintSupport.QPrintPreviewDialog(self.printer)
dialog.paintRequested.connect(self.print_current_tab)
dialog.exec_()
def menu_print(self):
dialog = QtPrintSupport.QPrintDialog(self.printer)
dialog.setWindowTitle('Print Document')
if dialog.exec_() == QtWidgets.QDialog.Accepted:
self.set_status_text('Printing...')
self.print_current_tab(self.printer)
self.set_status_text('')
def print_current_tab(self, printer):
scroll_area = self.get_active_tab()
# overall scale: set to fill width of page
page_width = printer.pageRect().width()
hsb = scroll_area.horizontalScrollBar()
frame_width = hsb.maximum() + hsb.pageStep()
scale = float(page_width)/float(frame_width)
x_offset = 0
sb = scroll_area.verticalScrollBar()
y_offset = - scale*sb.sliderPosition()
# # direct printing - comes out fuzzy
# painter = QtGui.QPainter(printer)
# painter.setRenderHints(QtGui.QPainter.Antialiasing
# | QtGui.QPainter.TextAntialiasing
# | QtGui.QPainter.SmoothPixmapTransform, True)
# painter.translate(x_offset, y_offset)
# painter.scale(scale, scale)
# scroll_area.frame.render(painter, QtCore.QPoint())
# painter.end()
# indirect printing: print to picture and then to printer
# for sharper output from many controls
# first draw to the QPicture
picture = QtGui.QPicture()
picture_painter = QtGui.QPainter(picture)
picture_painter.translate(x_offset, y_offset)
picture_painter.scale(scale, scale)
scroll_area.frame.render(picture_painter, QtCore.QPoint(0, 0))
picture_painter.end();
# then draw the QPicture to the printer
painter = QtGui.QPainter(printer)
painter.drawPicture(QtCore.QPoint(0, 0), picture);
painter.end()
def menu_cut(self):
w = self.app.focusWidget()
try:
w.cut()
except:
pass
def menu_copy(self):
w = self.app.focusWidget()
try:
w.copy()
except:
pass
def menu_paste(self):
w = self.app.focusWidget()
try:
w.paste()
except:
pass
def menu_delete(self):
w = self.app.focusWidget()
t = self.clipboard.text()
try:
w.cut()
except:
pass
self.clipboard.setText(t)
def menu_load_parameters_defaults(self):
tab_window = self.get_active_tab()
reply = QtWidgets.QMessageBox.question(self, 'Confirm Load Parameters',
'Are you sure you want to replace current parameters?',
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
tab_window.load_parameters(default=True)
def menu_load_parameters(self):
self.get_active_tab().load_parameters()
def menu_save_parameters_as_defaults(self):
self.get_active_tab().save_parameters(default=True)
def menu_save_parameters_as(self):
self.get_active_tab().save_parameters()
def shutdown(self):
# stop all action threads then exit
self.set_status_text('Waiting for threads and subprocesses to die...')
self.parent_process.stop()
def open_html_file(self, filename):
self.tab_frame.setUpdatesEnabled(False)
new_tab_window = TabHtmlWindow(self, self.parent_process)
# set current working directory
directory = os.path.dirname(filename)
if directory != '':
os.chdir(directory)
title = new_tab_window.open_file(filename)
index = self.tab_frame.addTab(new_tab_window, title)
self.tab_frame.setCurrentIndex(index)
self.tab_frame.setUpdatesEnabled(True)
self.enable_menu_items()
def open_workspace(self, filename):
with open(filename, 'r') as file:
file_list = pickle.load(file)
for f in file_list:
# set current working directory
os.chdir(os.path.dirname(f))
# open the file
self.open_html_file(f)
self.enable_menu_items()
def save_workspace(self, filename):
tf = self.tab_frame
l = list([])
initial_index = tf.currentIndex()
n_pages = tf.count()
self.tab_frame.setUpdatesEnabled(False)
for i in range(n_pages):
tf.setCurrentIndex(i)
html_filename = tf.currentWidget().html_file
l.append(html_filename)
with open(filename, 'w') as file:
pickle.dump(l, file, 0)
tf.setCurrentIndex(initial_index)
self.tab_frame.setUpdatesEnabled(True)
def menu_about(self):
QtWidgets.QMessageBox.about(self, 'About Pythics',
"""Python Instrument Control System, also known as Pythics
version 1.0.0
Copyright 2008 - 2019 Brian R. D'Urso
Pythics is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
Pythics is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with Pythics. If not, see
<http://www.gnu.org/licenses/>.""")
def menu_help(self):
# build the path to the help file
directory = os.path.dirname(inspect.getfile(pythics))
filename = os.path.join(directory, 'help', 'help.xml')
# open it
self.open_html_file(filename)
self.enable_menu_items()
#
# TabHtmlWindow - one for each primary html file
#
class TabHtmlWindow(pythics.html.HtmlWindow):
def __init__(self, parent, parent_process):
self.main_window = parent
self.parent_process = parent_process
self.title = None
super(TabHtmlWindow, self).__init__(parent, 'pythics.controls',
multiprocessing.get_logger())
# force widgets to redraw when the scrollbars are released
# this is needed for animated matplotlib widgets
self.verticalScrollBar().sliderReleased.connect(self.redraw)
self.horizontalScrollBar().sliderReleased.connect(self.redraw)
def redraw(self):
if not self.error:
self.child_process.redraw()
def close(self):
if not self.error:
try:
self.parent_process.stop_child_process(self.child_process)
except Exception:
self.logger.exception('Error while closing process.')
def set_title(self, title):
self.title = title
def open_file(self, filename):
self.error = False
try:
self.main_window.set_status_text('Loading file %s.' % filename)
self.html_file = filename
self.html_path, file_name_only = os.path.split(filename)
self.default_parameter_filename = 'defaults.txt'
anonymous_controls, controls = pythics.html.HtmlWindow.open_file(self, filename)
self.child_process = self.parent_process.new_child_process(self.html_path, file_name_only, anonymous_controls, controls)
self.child_process.start()
except:
message = 'Error while opening xml file %s\n' % file_name_only + traceback.format_exc(0)
QtWidgets.QMessageBox.critical(self, 'Error', message, QtWidgets.QMessageBox.Ok)
self.logger.exception('Error while opening xml file.')
self.error = True
self.main_window.set_status_text('')
if self.title is None:
self.title = file_name_only
self.set_title(self.title)
return self.title
def reload_file(self):
self.close()
self.reset()
# set current working directory
os.chdir(os.path.dirname(self.html_file))
return self.open_file(self.html_file)
# parameter save and recall functions for internal use
def load_parameters(self, filename='', default=False):
if not self.error:
try:
if default:
if not os.path.isabs(self.default_parameter_filename):
filename = os.path.join(self.html_path,
self.child_process.default_parameter_filename)
else:
if filename == '':
filename = self.main_window.get_open_filename('data (*.*)')
elif not os.path.isabs(filename):
filename = os.path.join(self.html_path, filename)
if filename != '':
try:
self.child_process.load_parameters(filename)
except IOError as error:
(errno, strerror) = error.args
self.logger.error('Error (%s) opening parameter file: %s.' % (errno, strerror))
except:
self.logger.exception('Error while loading parameters.')
def save_parameters(self, filename='', default=False):
if not self.error:
try:
if default:
if not os.path.isabs(self.default_parameter_filename):
filename = os.path.join(self.html_path,
self.child_process.default_parameter_filename)
else:
if filename == '':
filename = self.main_window.get_save_filename('data (*.*)')
elif not os.path.isabs(filename):
filename = os.path.join(self.html_path, filename)
if filename != '':
self.child_process.save_parameters(filename)
except:
self.logger.exception('Error while loading parameters.')
class OptionsProcessor(object):
def __init__(self):
# configure the logger
self.logger = multiprocessing.log_to_stderr()
#self.logger.setLevel(logging.DEBUG)
#self.logger.setLevel(logging.INFO)
self.logger.setLevel(logging.WARNING)
self.first_app = ""
self.first_workspace = ""
self.compact = False
self.shutdown_on_exit = False
def usage(self):
print("""\
Usage: pythics-run.py [options]
Options:
-h | --help show help text then exit
-a | --app selects startup app
-w | --workspace selects startup workspace
-c | --compact run in compact mode with simplified controls for small screens
-s | --shutdown shutdown computer on exit (*nix only)
-v | --verbose selects verbose mode
-d | --debug selects debug mode""")
def options(self):
try:
opts, args = getopt.getopt(sys.argv[1:], 'ha:w:csvd',
['help', 'app=', 'workspace=', 'compact', 'shutdown', 'verbose', 'debug'])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
self.usage()
sys.exit(2)
for o, a in opts:
if o in ('-v', '--verbose'):
self.logger.setLevel(logging.INFO)
elif o in ('-d', '--debug'):
self.logger.setLevel(logging.DEBUG)
elif o in ('-h', '--help'):
self.usage()
sys.exit(0)
elif o in ('-a', '--app'):
self.logger.info('opening app ' + a)
self.first_app = a
elif o in ('-w', '--workspace'):
self.logger.info('opening workspace ' + a)
self.first_workspace = a
elif o in ('-s', '--shutdown'):
self.logger.info('shutdown on exit')
self.shutdown_on_exit = True
elif o in ('-c', '--compact'):
self.logger.info('compact mode')
self.compact = True
else:
assert False, 'unhandled option'
#
# create and start the application
#
if __name__ == '__main__':
manager = multiprocessing.Manager()
application = QtWidgets.QApplication(sys.argv)
parent_process = pythics.parent.Parent(manager)
cl_options_processor = OptionsProcessor()
cl_options_processor.options()
window = MainWindow(parent_process, application, compact=cl_options_processor.compact)
window.show()
parent_process.start()
if os.path.isfile(cl_options_processor.first_workspace):
window.open_workspace(cl_options_processor.first_workspace)
elif os.path.isfile(cl_options_processor.first_app):
window.open_html_file(cl_options_processor.first_app)
window.shutdown_on_exit = cl_options_processor.shutdown_on_exit
application.exec_()
| gpl-3.0 |
billy-inn/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
cjermain/numpy | numpy/doc/creation.py | 118 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tests/test_panel.py | 7 | 93726 | # -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
import numpy as np
import pandas as pd
from pandas.types.common import is_float_dtype
from pandas import (Series, DataFrame, Index, date_range, isnull, notnull,
pivot, MultiIndex)
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
from pandas.formats.printing import pprint_thing
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas.tseries.offsets import BDay, MonthEnd
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
ensure_clean, assertRaisesRegexp,
makeCustomDataframe as mkdf,
makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
with tm.assertRaisesRegexp(ValueError, "No axis named foo"):
self.panel._get_axis_number('foo')
with tm.assertRaisesRegexp(ValueError, "No axis named foo"):
self.panel.__ge__(self.panel, axis='foo')
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow', 'mod']
if not compat.PY3:
ops.append('div')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
pprint_thing("Failing operation: %r" % 'div')
raise
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
r"shape of value must be \(3, 2\), "
r"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - BDay()
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_getitem_callable(self):
p = self.panel
# GH 12533
assert_frame_equal(p[lambda x: 'ItemB'], p.loc['ItemB'])
assert_panel_equal(p[lambda x: ['ItemB', 'ItemC']],
p.loc[['ItemB', 'ItemC']])
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
with np.errstate(invalid='ignore'):
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assert_index_equal(wp.major_axis, self.panel.major_axis)
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assert_index_equal(wp.major_axis, itemb.index[5:])
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assert_index_equal(result.major_axis, exp_major)
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
tm.assert_index_equal(conformed.index, self.panel.major_axis)
tm.assert_index_equal(conformed.columns, self.panel.minor_axis)
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_astype(self):
# GH7271
data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
panel = Panel(data, ['a', 'b'], ['c', 'd'], ['e', 'f'])
str_data = np.array([[['1', '2'], ['3', '4']],
[['5', '6'], ['7', '8']]])
expected = Panel(str_data, ['a', 'b'], ['c', 'd'], ['e', 'f'])
assert_panel_equal(panel.astype(str), expected)
self.assertRaises(NotImplementedError, panel.astype, {0: str})
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
with np.errstate(invalid='ignore'):
expected = np.sqrt(self.panel.values)
assert_almost_equal(applied.values, expected)
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
# Test in place fillNA
# Expected result
expected = Panel([[[0, 1], [2, 1]], [[10, 11], [12, 11]]],
items=['a', 'b'], minor_axis=['x', 'y'],
dtype=np.float64)
# method='ffill'
p1 = Panel([[[0, 1], [2, np.nan]], [[10, 11], [12, np.nan]]],
items=['a', 'b'], minor_axis=['x', 'y'],
dtype=np.float64)
p1.fillna(method='ffill', inplace=True)
assert_panel_equal(p1, expected)
# method='bfill'
p2 = Panel([[[0, np.nan], [2, 1]], [[10, np.nan], [12, 11]]],
items=['a', 'b'], minor_axis=['x', 'y'], dtype=np.float64)
p2.fillna(method='bfill', inplace=True)
assert_panel_equal(p2, expected)
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=BDay())
assert_panel_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
panel = _panel
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.ix[:, [0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
assert_panel_equal(result, expected)
# minor, 2
result = wp.pct_change(periods=2, axis='minor')
expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
'i2': df2.pct_change(periods=2, axis=1),
'i3': df3.pct_change(periods=2, axis=1)})
assert_panel_equal(result, expected)
# items, 1
result = wp.pct_change(axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [1, 0.5, .2],
'c2': [1. / 3, 0.25, 1. / 6]}),
'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
'c2': [.25, .2, 1. / 7]})})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=0)
assert_panel_equal(result, expected)
# items, 2
result = wp.pct_change(periods=2, axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2. / 3, .5, 1. / 3]})})
assert_panel_equal(result, expected)
def test_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
self.assert_panel_equal(expected, result)
def test_numpy_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = np.round(p)
self.assert_panel_equal(expected, result)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.round, p, out=p)
def test_multiindex_get(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.ix['a']
assert_panel_equal(f1, f2)
self.assertTrue((f1.items == [1, 2]).all())
self.assertTrue((f2.items == [1, 2]).all())
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_multiindex_blocks(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
self.assertTrue((f1.items == [1, 2]).all())
f1 = wp[('b', 1)]
self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
def test_repr_empty(self):
empty = Panel()
repr(empty)
def test_rename(self):
mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
renamed = self.panel.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assert_index_equal(renamed.items, exp)
renamed = self.panel.rename_axis(str.lower, axis=2)
exp = Index(['a', 'b', 'c', 'd'])
self.assert_index_equal(renamed.minor_axis, exp)
# don't copy
renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel['ItemA'].values == 3).all())
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
# specific cases from #3440
self.panel['a'] = self.panel['ItemA']
assert_frame_equal(self.panel['a'], self.panel.a)
self.panel['i'] = self.panel['ItemA']
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
('MSFT', 1)]
midx = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.rand(5, 4), index=midx)
p = df.to_panel()
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
try:
import xlwt # noqa
import xlrd # noqa
import openpyxl # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
with ensure_clean('__tmp__.' + ext) as path:
self.panel.to_excel(path)
try:
reader = ExcelFile(path)
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_to_excel_xlsxwriter(self):
try:
import xlrd # noqa
import xlsxwriter # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("Requires xlrd and xlsxwriter. Skipping test.")
with ensure_clean('__tmp__.xlsx') as path:
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
except ImportError as e:
raise nose.SkipTest("cannot write excel file: %s" % e)
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_dropna(self):
p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
p.ix[:, ['b', 'd'], 0] = np.nan
result = p.dropna(axis=1)
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
inp = p.copy()
inp.dropna(axis=1, inplace=True)
assert_panel_equal(inp, exp)
result = p.dropna(axis=1, how='all')
assert_panel_equal(result, p)
p.ix[:, ['b', 'd'], :] = np.nan
result = p.dropna(axis=1, how='all')
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
p = Panel(np.random.randn(4, 5, 6), items=list('abcd'))
p.ix[['b'], :, 0] = np.nan
result = p.dropna()
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
result = p.dropna(how='all')
assert_panel_equal(result, p)
p.ix['b'] = np.nan
result = p.dropna(how='all')
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
def test_drop(self):
df = DataFrame({"A": [1, 2], "B": [3, 4]})
panel = Panel({"One": df, "Two": df})
def check_drop(drop_val, axis_number, aliases, expected):
try:
actual = panel.drop(drop_val, axis=axis_number)
assert_panel_equal(actual, expected)
for alias in aliases:
actual = panel.drop(drop_val, axis=alias)
assert_panel_equal(actual, expected)
except AssertionError:
pprint_thing("Failed with axis_number %d and aliases: %s" %
(axis_number, aliases))
raise
# Items
expected = Panel({"One": df})
check_drop('Two', 0, ['items'], expected)
self.assertRaises(ValueError, panel.drop, 'Three')
# errors = 'ignore'
dropped = panel.drop('Three', errors='ignore')
assert_panel_equal(dropped, panel)
dropped = panel.drop(['Two', 'Three'], errors='ignore')
expected = Panel({"One": df})
assert_panel_equal(dropped, expected)
# Major
exp_df = DataFrame({"A": [2], "B": [4]}, index=[1])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(0, 1, ['major_axis', 'major'], expected)
exp_df = DataFrame({"A": [1], "B": [3]}, index=[0])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop([1], 1, ['major_axis', 'major'], expected)
# Minor
exp_df = df[['B']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(["A"], 2, ['minor_axis', 'minor'], expected)
exp_df = df[['A']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop("B", 2, ['minor_axis', 'minor'], expected)
def test_update(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other)
expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[3.6, 2., 3], [1.5, np.nan, 7], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_from_dict(self):
pan = Panel({'one': DataFrame([[1.5, np.nan, 3], [1.5, np.nan, 3],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]]),
'two': DataFrame([[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]])})
other = {'two': DataFrame([[3.6, 2., np.nan], [np.nan, np.nan, 7]])}
pan.update(other)
expected = Panel(
{'two': DataFrame([[3.6, 2., 3], [1.5, np.nan, 7],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]]),
'one': DataFrame([[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]])})
assert_panel_equal(pan, expected)
def test_update_nooverwrite(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other, overwrite=False)
expected = Panel([[[1.5, np.nan, 3], [1.5, np.nan, 3],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[1.5, 2., 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_filtered(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other, filter_func=lambda x: x > 2)
expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[1.5, np.nan, 3], [1.5, np.nan, 7],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_raise(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
self.assertRaises(Exception, pan.update, *(pan, ),
**{'raise_conflict': True})
def test_all_any(self):
self.assertTrue((self.panel.all(axis=0).values == nanall(
self.panel, axis=0)).all())
self.assertTrue((self.panel.all(axis=1).values == nanall(
self.panel, axis=1).T).all())
self.assertTrue((self.panel.all(axis=2).values == nanall(
self.panel, axis=2).T).all())
self.assertTrue((self.panel.any(axis=0).values == nanany(
self.panel, axis=0)).all())
self.assertTrue((self.panel.any(axis=1).values == nanany(
self.panel, axis=1).T).all())
self.assertTrue((self.panel.any(axis=2).values == nanany(
self.panel, axis=2).T).all())
def test_all_any_unhandled(self):
self.assertRaises(NotImplementedError, self.panel.all, bool_only=True)
self.assertRaises(NotImplementedError, self.panel.any, bool_only=True)
class TestLongPanel(tm.TestCase):
"""
LongPanel no longer exists, but...
"""
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
panel = tm.makePanel()
tm.add_nans(panel)
self.panel = panel.to_frame()
self.unfiltered_panel = panel.to_frame(filter_observations=False)
def test_ops_differently_indexed(self):
# trying to set non-identically indexed panel
wp = self.panel.to_panel()
wp2 = wp.reindex(major=wp.major_axis[:-1])
lp2 = wp2.to_frame()
result = self.panel + lp2
assert_frame_equal(result.reindex(lp2.index), lp2 * 2)
# careful, mutation
self.panel['foo'] = lp2['ItemA']
assert_series_equal(self.panel['foo'].reindex(lp2.index), lp2['ItemA'],
check_names=False)
def test_ops_scalar(self):
result = self.panel.mul(2)
expected = DataFrame.__mul__(self.panel, 2)
assert_frame_equal(result, expected)
def test_combineFrame(self):
wp = self.panel.to_panel()
result = self.panel.add(wp['ItemA'].stack(), axis=0)
assert_frame_equal(result.to_panel()['ItemA'], wp['ItemA'] * 2)
def test_combinePanel(self):
wp = self.panel.to_panel()
result = self.panel.add(self.panel)
wide_result = result.to_panel()
assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])
# one item
result = self.panel.add(self.panel.filter(['ItemA']))
def test_combine_scalar(self):
result = self.panel.mul(2)
expected = DataFrame(self.panel._data) * 2
assert_frame_equal(result, expected)
def test_combine_series(self):
s = self.panel['ItemA'][:10]
result = self.panel.add(s, axis=0)
expected = DataFrame.add(self.panel, s, axis=0)
assert_frame_equal(result, expected)
s = self.panel.ix[5]
result = self.panel + s
expected = DataFrame.add(self.panel, s, axis=1)
assert_frame_equal(result, expected)
def test_operators(self):
wp = self.panel.to_panel()
result = (self.panel + 1).to_panel()
assert_frame_equal(wp['ItemA'] + 1, result['ItemA'])
def test_arith_flex_panel(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
self.panel = self.panel.to_panel()
for n in [np.random.randint(-50, -1), np.random.randint(1, 50), 0]:
for op in ops:
alias = aliases.get(op, op)
f = getattr(operator, alias)
exp = f(self.panel, n)
result = getattr(self.panel, op)(n)
assert_panel_equal(result, exp, check_panel_type=True)
# rops
r_f = lambda x, y: f(y, x)
exp = r_f(self.panel, n)
result = getattr(self.panel, 'r' + op)(n)
assert_panel_equal(result, exp)
def test_sort(self):
def is_sorted(arr):
return (arr[1:] > arr[:-1]).any()
sorted_minor = self.panel.sortlevel(level=1)
self.assertTrue(is_sorted(sorted_minor.index.labels[1]))
sorted_major = sorted_minor.sortlevel(level=0)
self.assertTrue(is_sorted(sorted_major.index.labels[0]))
def test_to_string(self):
buf = StringIO()
self.panel.to_string(buf)
def test_to_sparse(self):
if isinstance(self.panel, Panel):
msg = 'sparsifying is not supported'
tm.assertRaisesRegexp(NotImplementedError, msg,
self.panel.to_sparse)
def test_truncate(self):
dates = self.panel.index.levels[0]
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(start, end)
# TODO trucate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
trunced = self.panel.truncate(before=start).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(before=start)
# TODO trucate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
trunced = self.panel.truncate(after=end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(after=end)
# TODO trucate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
# truncate on dates that aren't in there
wp = self.panel.to_panel()
new_index = wp.major_axis[::5]
wp2 = wp.reindex(major=new_index)
lp2 = wp2.to_frame()
lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2])
wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2])
assert_panel_equal(wp_trunc, lp_trunc.to_panel())
# throw proper exception
self.assertRaises(Exception, lp2.truncate, wp.major_axis[-2],
wp.major_axis[2])
def test_axis_dummies(self):
from pandas.core.reshape import make_axis_dummies
minor_dummies = make_axis_dummies(self.panel, 'minor').astype(np.uint8)
self.assertEqual(len(minor_dummies.columns),
len(self.panel.index.levels[1]))
major_dummies = make_axis_dummies(self.panel, 'major').astype(np.uint8)
self.assertEqual(len(major_dummies.columns),
len(self.panel.index.levels[0]))
mapping = {'A': 'one', 'B': 'one', 'C': 'two', 'D': 'two'}
transformed = make_axis_dummies(self.panel, 'minor',
transform=mapping.get).astype(np.uint8)
self.assertEqual(len(transformed.columns), 2)
self.assert_index_equal(transformed.columns, Index(['one', 'two']))
# TODO: test correctness
def test_get_dummies(self):
from pandas.core.reshape import get_dummies, make_axis_dummies
self.panel['Label'] = self.panel.index.labels[1]
minor_dummies = make_axis_dummies(self.panel, 'minor').astype(np.uint8)
dummies = get_dummies(self.panel['Label'])
self.assert_numpy_array_equal(dummies.values, minor_dummies.values)
def test_mean(self):
means = self.panel.mean(level='minor')
# test versus Panel version
wide_means = self.panel.to_panel().mean('major')
assert_frame_equal(means, wide_means)
def test_sum(self):
sums = self.panel.sum(level='minor')
# test versus Panel version
wide_sums = self.panel.to_panel().sum('major')
assert_frame_equal(sums, wide_sums)
def test_count(self):
index = self.panel.index
major_count = self.panel.count(level=0)['ItemA']
labels = index.labels[0]
for i, idx in enumerate(index.levels[0]):
self.assertEqual(major_count[i], (labels == i).sum())
minor_count = self.panel.count(level=1)['ItemA']
labels = index.labels[1]
for i, idx in enumerate(index.levels[1]):
self.assertEqual(minor_count[i], (labels == i).sum())
def test_join(self):
lp1 = self.panel.filter(['ItemA', 'ItemB'])
lp2 = self.panel.filter(['ItemC'])
joined = lp1.join(lp2)
self.assertEqual(len(joined.columns), 3)
self.assertRaises(Exception, lp1.join,
self.panel.filter(['ItemB', 'ItemC']))
def test_pivot(self):
from pandas.core.reshape import _slow_pivot
one, two, three = (np.array([1, 2, 3, 4, 5]),
np.array(['a', 'b', 'c', 'd', 'e']),
np.array([1, 2, 3, 5, 4.]))
df = pivot(one, two, three)
self.assertEqual(df['a'][1], 1)
self.assertEqual(df['b'][2], 2)
self.assertEqual(df['c'][3], 3)
self.assertEqual(df['d'][4], 5)
self.assertEqual(df['e'][5], 4)
assert_frame_equal(df, _slow_pivot(one, two, three))
# weird overlap, TODO: test?
a, b, c = (np.array([1, 2, 3, 4, 4]),
np.array(['a', 'a', 'a', 'a', 'a']),
np.array([1., 2., 3., 4., 5.]))
self.assertRaises(Exception, pivot, a, b, c)
# corner case, empty
df = pivot(np.array([]), np.array([]), np.array([]))
def test_monotonic():
pos = np.array([1, 2, 3, 5])
def _monotonic(arr):
return not (arr[1:] < arr[:-1]).any()
assert _monotonic(pos)
neg = np.array([1, 2, 3, 4, 3])
assert not _monotonic(neg)
neg2 = np.array([5, 1, 2, 3, 4, 5])
assert not _monotonic(neg2)
def test_panel_index():
index = panelm.panel_index([1, 2, 3, 4], [1, 2, 3])
expected = MultiIndex.from_arrays([np.tile([1, 2, 3, 4], 3),
np.repeat([1, 2, 3], 4)],
names=['time', 'panel'])
tm.assert_index_equal(index, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
rajul/mne-python | examples/decoding/plot_linear_model_patterns.py | 13 | 3098 | """
===============================================================
Linear classifier on sensor data with plot patterns and filters
===============================================================
Decoding, a.k.a MVPA or supervised machine learning applied to MEG and EEG
data in sensor space. Fit a linear classifier with the LinearModel object
providing topographical patterns which are more neurophysiologically
interpretable [1] than the classifier filters (weight vectors).
The patterns explain how the MEG and EEG data were generated from the
discriminant neural sources which are extracted by the filters.
Note patterns/filters in MEG data are more similar than EEG data
because the noise is less spatially correlated in MEG than EEG.
[1] Haufe, S., Meinecke, F., Görgen, K., Dähne, S., Haynes, J.-D.,
Blankertz, B., & Bießmann, F. (2014). On the interpretation of
weight vectors of linear models in multivariate neuroimaging.
NeuroImage, 87, 96–110. doi:10.1016/j.neuroimage.2013.10.067
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Romain Trachel <trachelr@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
# import a linear classifier from mne.decoding
from mne.decoding import LinearModel
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
decim=4, baseline=None, preload=True)
labels = epochs.events[:, -1]
# get MEG and EEG data
meg_epochs = epochs.pick_types(meg=True, eeg=False, copy=True)
meg_data = meg_epochs.get_data().reshape(len(labels), -1)
eeg_epochs = epochs.pick_types(meg=False, eeg=True, copy=True)
eeg_data = eeg_epochs.get_data().reshape(len(labels), -1)
###############################################################################
# Decoding in sensor space using a LogisticRegression classifier
clf = LogisticRegression()
sc = StandardScaler()
# create a linear model with LogisticRegression
model = LinearModel(clf)
# fit the classifier on MEG data
X = sc.fit_transform(meg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(meg_epochs.info, title='MEG Patterns')
model.plot_filters(meg_epochs.info, title='MEG Filters')
# fit the classifier on EEG data
X = sc.fit_transform(eeg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(eeg_epochs.info, title='EEG Patterns')
model.plot_filters(eeg_epochs.info, title='EEG Filters')
| bsd-3-clause |
nagyistoce/kaggle-galaxies | try_convnet_cc_multirotflip_3x69r45_shareddense.py | 7 | 17280 | import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_shareddense.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_shareddense.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
l3f = layers.FlattenLayer(l3s)
l4a = layers.DenseLayer(l3f, n_outputs=512, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
j4 = layers.MultiRotMergeLayer(l4, num_views=4) # 2) # merge convolutional parts
l5a = layers.DenseLayer(j4, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l5 = layers.FeatureMaxPoolingLayer(l5a, pool_size=2, feature_dim=1, implementation='reshape')
l6a = layers.DenseLayer(l5, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l6a) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| bsd-3-clause |
cpcloud/arrow | python/pyarrow/serialization.py | 2 | 7287 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict, defaultdict
import six
import sys
import numpy as np
from pyarrow.compat import builtin_pickle
from pyarrow.lib import (SerializationContext, _default_serialization_context,
py_buffer)
try:
import cloudpickle
except ImportError:
cloudpickle = builtin_pickle
# ----------------------------------------------------------------------
# Set up serialization for numpy with dtype object (primitive types are
# handled efficiently with Arrow's Tensor facilities, see
# python_to_arrow.cc)
def _serialize_numpy_array_list(obj):
if obj.dtype.str != '|O':
# Make the array c_contiguous if necessary so that we can call change
# the view.
if not obj.flags.c_contiguous:
obj = np.ascontiguousarray(obj)
return obj.view('uint8'), obj.dtype.str
else:
return obj.tolist(), obj.dtype.str
def _deserialize_numpy_array_list(data):
if data[1] != '|O':
assert data[0].dtype == np.uint8
return data[0].view(data[1])
else:
return np.array(data[0], dtype=np.dtype(data[1]))
def _pickle_to_buffer(x):
pickled = builtin_pickle.dumps(x, protocol=builtin_pickle.HIGHEST_PROTOCOL)
return py_buffer(pickled)
def _load_pickle_from_buffer(data):
as_memoryview = memoryview(data)
if six.PY2:
return builtin_pickle.loads(as_memoryview.tobytes())
else:
return builtin_pickle.loads(as_memoryview)
# ----------------------------------------------------------------------
# pandas-specific serialization matters
def _register_custom_pandas_handlers(context):
# ARROW-1784, faster path for pandas-only visibility
try:
import pandas as pd
except ImportError:
return
import pyarrow.pandas_compat as pdcompat
sparse_type_error_msg = (
'{0} serialization is not supported.\n'
'Note that {0} is planned to be deprecated '
'in pandas future releases.\n'
'See https://github.com/pandas-dev/pandas/issues/19239 '
'for more information.'
)
def _serialize_pandas_dataframe(obj):
if isinstance(obj, pd.SparseDataFrame):
raise NotImplementedError(
sparse_type_error_msg.format('SparseDataFrame')
)
return pdcompat.dataframe_to_serialized_dict(obj)
def _deserialize_pandas_dataframe(data):
return pdcompat.serialized_dict_to_dataframe(data)
def _serialize_pandas_series(obj):
if isinstance(obj, pd.SparseSeries):
raise NotImplementedError(
sparse_type_error_msg.format('SparseSeries')
)
return _serialize_pandas_dataframe(pd.DataFrame({obj.name: obj}))
def _deserialize_pandas_series(data):
deserialized = _deserialize_pandas_dataframe(data)
return deserialized[deserialized.columns[0]]
context.register_type(
pd.Series, 'pd.Series',
custom_serializer=_serialize_pandas_series,
custom_deserializer=_deserialize_pandas_series)
context.register_type(
pd.Index, 'pd.Index',
custom_serializer=_pickle_to_buffer,
custom_deserializer=_load_pickle_from_buffer)
context.register_type(
pd.DataFrame, 'pd.DataFrame',
custom_serializer=_serialize_pandas_dataframe,
custom_deserializer=_deserialize_pandas_dataframe)
def register_torch_serialization_handlers(serialization_context):
# ----------------------------------------------------------------------
# Set up serialization for pytorch tensors
try:
import torch
def _serialize_torch_tensor(obj):
return obj.detach().numpy()
def _deserialize_torch_tensor(data):
return torch.from_numpy(data)
for t in [torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor,
torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.Tensor]:
serialization_context.register_type(
t, "torch." + t.__name__,
custom_serializer=_serialize_torch_tensor,
custom_deserializer=_deserialize_torch_tensor)
except ImportError:
# no torch
pass
def register_default_serialization_handlers(serialization_context):
# ----------------------------------------------------------------------
# Set up serialization for primitive datatypes
# TODO(pcm): This is currently a workaround until arrow supports
# arbitrary precision integers. This is only called on long integers,
# see the associated case in the append method in python_to_arrow.cc
serialization_context.register_type(
int, "int",
custom_serializer=lambda obj: str(obj),
custom_deserializer=lambda data: int(data))
if (sys.version_info < (3, 0)):
serialization_context.register_type(
long, "long", # noqa: F821
custom_serializer=lambda obj: str(obj),
custom_deserializer=lambda data: long(data)) # noqa: F821
def _serialize_ordered_dict(obj):
return list(obj.keys()), list(obj.values())
def _deserialize_ordered_dict(data):
return OrderedDict(zip(data[0], data[1]))
serialization_context.register_type(
OrderedDict, "OrderedDict",
custom_serializer=_serialize_ordered_dict,
custom_deserializer=_deserialize_ordered_dict)
def _serialize_default_dict(obj):
return list(obj.keys()), list(obj.values()), obj.default_factory
def _deserialize_default_dict(data):
return defaultdict(data[2], zip(data[0], data[1]))
serialization_context.register_type(
defaultdict, "defaultdict",
custom_serializer=_serialize_default_dict,
custom_deserializer=_deserialize_default_dict)
serialization_context.register_type(
type(lambda: 0), "function",
pickle=True)
serialization_context.register_type(type, "type", pickle=True)
serialization_context.register_type(
np.ndarray, 'np.array',
custom_serializer=_serialize_numpy_array_list,
custom_deserializer=_deserialize_numpy_array_list)
_register_custom_pandas_handlers(serialization_context)
def default_serialization_context():
context = SerializationContext()
register_default_serialization_handlers(context)
return context
register_default_serialization_handlers(_default_serialization_context)
| apache-2.0 |
enakai00/ml4se | scripts/02-square_error.py | 1 | 3206 | # -*- coding: utf-8 -*-
#
# 誤差関数(最小二乗法)による回帰分析
#
# 2015/04/22 ver1.0
#
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas import Series, DataFrame
from numpy.random import normal
#------------#
# Parameters #
#------------#
N=10 # サンプルを取得する位置 x の個数
M=[0,1,3,9] # 多項式の次数
# データセット {x_n,y_n} (n=1...N) を用意
def create_dataset(num):
dataset = DataFrame(columns=['x','y'])
for i in range(num):
x = float(i)/float(num-1)
y = np.sin(2*np.pi*x) + normal(scale=0.3)
dataset = dataset.append(Series([x,y], index=['x','y']),
ignore_index=True)
return dataset
# 平方根平均二乗誤差(Root mean square error)を計算
def rms_error(dataset, f):
err = 0.0
for index, line in dataset.iterrows():
x, y = line.x, line.y
err += 0.5 * (y - f(x))**2
return np.sqrt(2 * err / len(dataset))
# 最小二乗法で解を求める
def resolve(dataset, m):
t = dataset.y
phi = DataFrame()
for i in range(0,m+1):
p = dataset.x**i
p.name="x**%d" % i
phi = pd.concat([phi,p], axis=1)
tmp = np.linalg.inv(np.dot(phi.T, phi))
ws = np.dot(np.dot(tmp, phi.T), t)
def f(x):
y = 0
for i, w in enumerate(ws):
y += w * (x ** i)
return y
return (f, ws)
# Main
if __name__ == '__main__':
train_set = create_dataset(N)
test_set = create_dataset(N)
df_ws = DataFrame()
# 多項式近似の曲線を求めて表示
fig = plt.figure()
for c, m in enumerate(M):
f, ws = resolve(train_set, m)
df_ws = df_ws.append(Series(ws,name="M=%d" % m))
subplot = fig.add_subplot(2,2,c+1)
subplot.set_xlim(-0.05,1.05)
subplot.set_ylim(-1.5,1.5)
subplot.set_title("M=%d" % m)
# トレーニングセットを表示
subplot.scatter(train_set.x, train_set.y,
marker='o', color='blue', label=None)
# 真の曲線を表示
linex = np.linspace(0,1,101)
liney = np.sin(2*np.pi*linex)
subplot.plot(linex, liney, color='green', linestyle='--')
# 多項式近似の曲線を表示
linex = np.linspace(0,1,101)
liney = f(linex)
label = "E(RMS)=%.2f" % rms_error(train_set, f)
subplot.plot(linex, liney, color='red', label=label)
subplot.legend(loc=1)
# 係数の値を表示
print "Table of the coefficients"
print df_ws.transpose()
fig.show()
# トレーニングセットとテストセットでの誤差の変化を表示
df = DataFrame(columns=['Training set','Test set'])
for m in range(0,10): # 多項式の次数
f, ws = resolve(train_set, m)
train_error = rms_error(train_set, f)
test_error = rms_error(test_set, f)
df = df.append(
Series([train_error, test_error],
index=['Training set','Test set']),
ignore_index=True)
df.plot(title='RMS Error', style=['-','--'], grid=True, ylim=(0,0.9))
plt.show()
| gpl-2.0 |
mattilyra/scikit-learn | examples/decomposition/plot_incremental_pca.py | 175 | 1974 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/viz/utils.py | 6 | 30185 | """Utility functions for plotting M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import math
from functools import partial
import difflib
import webbrowser
from warnings import warn
import tempfile
import numpy as np
from ..io import show_fiff
from ..utils import verbose, set_config
COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
'#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
def _setup_vmin_vmax(data, vmin, vmax, norm=False):
"""Aux function to handle vmin and vmax parameters"""
if vmax is None and vmin is None:
vmax = np.abs(data).max()
if norm:
vmin = 0.
else:
vmin = -vmax
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
if norm:
vmin = 0.
else:
vmin = np.min(data)
if callable(vmax):
vmax = vmax(data)
elif vmax is None:
vmax = np.max(data)
return vmin, vmax
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
""" Adjust subplot parameters to give specified padding.
Note. For plotting please use this function instead of plt.tight_layout
Parameters
----------
pad : float
padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad : float
Padding height between edges of adjacent subplots.
Defaults to `pad_inches`.
w_pad : float
Padding width between edges of adjacent subplots.
Defaults to `pad_inches`.
fig : instance of Figure
Figure to apply changes to.
"""
import matplotlib.pyplot as plt
fig = plt.gcf() if fig is None else fig
fig.canvas.draw()
try: # see https://github.com/matplotlib/matplotlib/issues/2654
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except Exception:
warn('Matplotlib function \'tight_layout\' is not supported.'
' Skipping subplot adjusment.')
else:
try:
fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad))
except Exception:
pass
def _check_delayed_ssp(container):
""" Aux function to be used for interactive SSP selection
"""
if container.proj is True or\
all(p['active'] for p in container.info['projs']):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze
Parameters
----------
limits : list (or array) of length 3 or 6
Bounds for the colormap, which will be mirrored across zero if length
3, or completely specified (and potentially asymmetric) if length 6.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of matplotlib.pyplot.colormap | array
A teal->blue->gray->red->yellow colormap.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
Examples
--------
The following code will plot a STC using standard MNE limits:
colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
"""
# Ensure limits is an array
limits = np.asarray(limits, dtype='float')
if len(limits) != 3 and len(limits) != 6:
raise ValueError('limits must have 3 or 6 elements')
if len(limits) == 3 and any(limits < 0.):
raise ValueError('if 3 elements, limits must all be non-negative')
if any(np.diff(limits) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
if len(limits) == 3:
limits = (np.concatenate((-np.flipud(limits), limits)) +
limits[-1]) / (2 * limits[-1])
else:
limits = (limits - np.min(limits)) / np.max(limits -
np.min(limits))
cdict = {'red': ((limits[0], 0.0, 0.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
'green': ((limits[0], 1.0, 1.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 1.0, 1.0)),
'blue': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 0.0, 0.0))}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
if len(limits) == 3:
limits = np.concatenate((-np.flipud(limits), [0], limits)) /\
limits[-1]
else:
limits = np.concatenate((limits[:3], [0], limits[3:]))
limits /= np.max(np.abs(limits))
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, limits, 255 * c)
for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
def _toggle_options(event, params):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
if len(params['projs']) > 0:
if params['fig_proj'] is None:
_draw_proj_checkbox(event, params, draw_current_state=False)
else:
# turn off options dialog
plt.close(params['fig_proj'])
del params['proj_checks']
params['fig_proj'] = None
def _toggle_proj(event, params):
"""Operation to perform when proj boxes clicked"""
# read options if possible
if 'proj_checks' in params:
bools = [x[0].get_visible() for x in params['proj_checks'].lines]
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
bools = [True] * len(params['projs'])
compute_proj = False
if 'proj_bools' not in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _get_help_text(params):
"""Aux function for customizing help dialogs text."""
text, text2 = list(), list()
text.append(u'\u2190 : \n')
text.append(u'\u2192 : \n')
text.append(u'\u2193 : \n')
text.append(u'\u2191 : \n')
text.append(u'- : \n')
text.append(u'+ or = : \n')
text.append(u'Home : \n')
text.append(u'End : \n')
text.append(u'Page down : \n')
text.append(u'Page up : \n')
text.append(u'F11 : \n')
text.append(u'? : \n')
text.append(u'Esc : \n\n')
text.append(u'Mouse controls\n')
text.append(u'click on data :\n')
text2.append('Navigate left\n')
text2.append('Navigate right\n')
text2.append('Scale down\n')
text2.append('Scale up\n')
text2.append('Toggle full screen mode\n')
text2.append('Open help box\n')
text2.append('Quit\n\n\n')
if 'raw' in params:
text2.insert(4, 'Reduce the time shown per view\n')
text2.insert(5, 'Increase the time shown per view\n')
text.append(u'click elsewhere in the plot :\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
elif 'epochs' in params:
text.append(u'right click :\n')
text2.insert(4, 'Reduce the number of epochs per view\n')
text2.insert(5, 'Increase the number of epochs per view\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark component for exclusion\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text.append(u'right click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text.insert(10, u'b : \n')
text2.insert(10, 'Toggle butterfly plot on/off\n')
text.insert(11, u'h : \n')
text2.insert(11, 'Show histogram of peak-to-peak values\n')
text2.append('Mark bad epoch\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
text2.append('Plot ERP/ERF image\n')
text.append(u'middle click :\n')
text2.append('Show channel name (butterfly plot)\n')
text.insert(11, u'o : \n')
text2.insert(11, 'View settings (orig. view only)\n')
return ''.join(text), ''.join(text2)
def _prepare_trellis(n_cells, max_col):
"""Aux function
"""
import matplotlib.pyplot as plt
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
ax.set_visible(False)
return fig, axes
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog"""
from matplotlib import widgets
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
[True] * len(params['projs']))
width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
height = len(projs) / 6.0 + 0.5
fig_proj = figure_nobar(figsize=(width, height))
fig_proj.canvas.set_window_title('SSP projection vectors')
params['fig_proj'] = fig_proj # necessary for proper toggling
ax_temp = fig_proj.add_axes((0, 0, 1, 1), frameon=False)
proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active'] is True:
for x in proj_checks.lines[ii]:
x.set_color('r')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
# this should work for non-test cases
try:
fig_proj.canvas.draw()
fig_proj.show()
except Exception:
pass
def _layout_figure(params):
"""Function for setting figure layout. Shared with raw and epoch plots"""
size = params['fig'].get_size_inches() * params['fig'].dpi
scroll_width = 25
hscroll_dist = 25
vscroll_dist = 10
l_border = 100
r_border = 10
t_border = 35
b_border = 40
# only bother trying to reset layout if it's reasonable to do so
if size[0] < 2 * scroll_width or size[1] < 2 * scroll_width + hscroll_dist:
return
# convert to relative units
scroll_width_x = scroll_width / size[0]
scroll_width_y = scroll_width / size[1]
vscroll_dist /= size[0]
hscroll_dist /= size[1]
l_border /= size[0]
r_border /= size[0]
t_border /= size[1]
b_border /= size[1]
# main axis (traces)
ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
ax_y = hscroll_dist + scroll_width_y + b_border
ax_height = 1.0 - ax_y - t_border
pos = [l_border, ax_y, ax_width, ax_height]
params['ax'].set_position(pos)
if 'ax2' in params:
params['ax2'].set_position(pos)
params['ax'].set_position(pos)
# vscroll (channels)
pos = [ax_width + l_border + vscroll_dist, ax_y,
scroll_width_x, ax_height]
params['ax_vscroll'].set_position(pos)
# hscroll (time)
pos = [l_border, b_border, ax_width, scroll_width_y]
params['ax_hscroll'].set_position(pos)
if 'ax_button' in params:
# options button
pos = [l_border + ax_width + vscroll_dist, b_border,
scroll_width_x, scroll_width_y]
params['ax_button'].set_position(pos)
if 'ax_help_button' in params:
pos = [l_border - vscroll_dist - scroll_width_x * 2, b_border,
scroll_width_x * 2, scroll_width_y]
params['ax_help_button'].set_position(pos)
params['fig'].canvas.draw()
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'w')
else:
f = tempfile.NamedTemporaryFile('w', delete=False, suffix='.html')
fname_out = f.name
with f as fid:
fid.write(diff)
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar"""
from matplotlib import rcParams, pyplot as plt
old_val = rcParams['toolbar']
try:
rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
except Exception as ex:
raise ex
finally:
rcParams['toolbar'] = old_val
return fig
def _helper_raw_resize(event, params):
"""Helper for resizing"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size)
_layout_figure(params)
def _plot_raw_onscroll(event, params, len_channels=None):
"""Interpret scroll events"""
if len_channels is None:
len_channels = len(params['info']['ch_names'])
orig_start = params['ch_start']
if event.step < 0:
params['ch_start'] = min(params['ch_start'] + params['n_channels'],
len_channels - params['n_channels'])
else: # event.key == 'up':
params['ch_start'] = max(params['ch_start'] - params['n_channels'], 0)
if orig_start != params['ch_start']:
_channels_changed(params, len_channels)
def _channels_changed(params, len_channels):
"""Helper function for dealing with the vertical shift of the viewport."""
if params['ch_start'] + params['n_channels'] > len_channels:
params['ch_start'] = len_channels - params['n_channels']
if params['ch_start'] < 0:
params['ch_start'] = 0
params['plot_fun']()
def _plot_raw_time(value, params):
"""Deal with changed time value"""
info = params['info']
max_times = params['n_times'] / float(info['sfreq']) - params['duration']
if value > max_times:
value = params['n_times'] / info['sfreq'] - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
def _plot_raw_onkey(event, params):
"""Interpret key presses"""
import matplotlib.pyplot as plt
if event.key == 'escape':
plt.close(params['fig'])
elif event.key == 'down':
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'up':
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'right':
value = params['t_start'] + params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key == 'left':
value = params['t_start'] - params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key in ['+', '=']:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == '-':
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key == 'pageup':
n_channels = params['n_channels'] + 1
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'pagedown':
n_channels = params['n_channels'] - 1
if n_channels == 0:
return
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
if len(params['lines']) > n_channels: # remove line from view
params['lines'][n_channels].set_xdata([])
params['lines'][n_channels].set_ydata([])
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'home':
duration = params['duration'] - 1.0
if duration <= 0:
return
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == 'end':
duration = params['duration'] + 1.0
if duration > params['raw'].times[-1]:
duration = params['raw'].times[-1]
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
def _mouse_click(event, params):
"""Vertical select callback"""
if event.button != 1:
return
if event.inaxes is None:
if params['n_channels'] > 100:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > params['t_start'] or pos[1] < 0 or pos[1] > ylim[0]:
return
params['label_click_fun'](pos)
# vertical scrollbar changed
if event.inaxes == params['ax_vscroll']:
ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scrollbar changed
elif event.inaxes == params['ax_hscroll']:
_plot_raw_time(event.xdata - params['duration'] / 2, params)
params['update_fun']()
params['plot_fun']()
elif event.inaxes == params['ax']:
params['pick_bads_fun'](event)
def _select_bads(event, params, bads):
"""Helper for selecting bad channels onpick. Returns updated bads list."""
# trade-off, avoid selecting more than one channel when drifts are present
# however for clean data don't click on peaks but on flat segments
def f(x, y):
return y(np.mean(x), x.std() * 2)
lines = event.inaxes.lines
for line in lines:
ydata = line.get_ydata()
if not isinstance(ydata, list) and not np.isnan(ydata).any():
ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
if ymin <= event.ydata <= ymax:
this_chan = vars(line)['ch_name']
if this_chan in params['info']['ch_names']:
ch_idx = params['ch_start'] + lines.index(line)
if this_chan not in bads:
bads.append(this_chan)
color = params['bad_color']
line.set_zorder(-1)
else:
while this_chan in bads:
bads.remove(this_chan)
color = vars(line)['def_color']
line.set_zorder(0)
line.set_color(color)
params['ax_vscroll'].patches[ch_idx].set_color(color)
break
else:
x = np.array([event.xdata] * 2)
params['ax_vertline'].set_data(x, np.array(params['ax'].get_ylim()))
params['ax_hscroll_vertline'].set_data(x, np.array([0., 1.]))
params['vertline_t'].set_text('%0.3f' % x[0])
return bads
def _onclick_help(event, params):
"""Function for drawing help window"""
import matplotlib.pyplot as plt
text, text2 = _get_help_text(params)
width = 6
height = 5
fig_help = figure_nobar(figsize=(width, height), dpi=80)
fig_help.canvas.set_window_title('Help')
ax = plt.subplot2grid((8, 5), (0, 0), colspan=5)
ax.set_title('Keyboard shortcuts')
plt.axis('off')
ax1 = plt.subplot2grid((8, 5), (1, 0), rowspan=7, colspan=2)
ax1.set_yticklabels(list())
plt.text(0.99, 1, text, fontname='STIXGeneral', va='top', weight='bold',
ha='right')
plt.axis('off')
ax2 = plt.subplot2grid((8, 5), (1, 2), rowspan=7, colspan=3)
ax2.set_yticklabels(list())
plt.text(0, 1, text2, fontname='STIXGeneral', va='top')
plt.axis('off')
tight_layout(fig=fig_help)
# this should work for non-test cases
try:
fig_help.canvas.draw()
fig_help.show()
except Exception:
pass
class ClickableImage(object):
"""
Display an image so you can click on it and store x/y positions.
Takes as input an image array (can be any array that works with imshow,
but will work best with images. Displays the image and lets you
click on it. Stores the xy coordinates of each click, so now you can
superimpose something on top of it.
Upon clicking, the x/y coordinate of the cursor will be stored in
self.coords, which is a list of (x, y) tuples.
Parameters
----------
imdata: ndarray
The image that you wish to click on for 2-d points.
**kwargs : dict
Keyword arguments. Passed to ax.imshow.
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, imdata, **kwargs):
"""Display the image for clicking."""
from matplotlib.pyplot import figure, show
self.coords = []
self.imdata = imdata
self.fig = figure()
self.ax = self.fig.add_subplot(111)
self.ymax = self.imdata.shape[0]
self.xmax = self.imdata.shape[1]
self.im = self.ax.imshow(imdata, aspect='auto',
extent=(0, self.xmax, 0, self.ymax),
picker=True, **kwargs)
self.ax.axis('off')
self.fig.canvas.mpl_connect('pick_event', self.onclick)
show()
def onclick(self, event):
"""Mouse click handler.
Parameters
----------
event: matplotlib event object
The matplotlib object that we use to get x/y position.
"""
mouseevent = event.mouseevent
self.coords.append((mouseevent.xdata, mouseevent.ydata))
def plot_clicks(self, **kwargs):
"""Plot the x/y positions stored in self.coords.
Parameters
----------
**kwargs : dict
Arguments are passed to imshow in displaying the bg image.
"""
from matplotlib.pyplot import subplots, show
f, ax = subplots()
ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs)
xlim, ylim = [ax.get_xlim(), ax.get_ylim()]
xcoords, ycoords = zip(*self.coords)
ax.scatter(xcoords, ycoords, c='r')
ann_text = np.arange(len(self.coords)).astype(str)
for txt, coord in zip(ann_text, self.coords):
ax.annotate(txt, coord, fontsize=20, color='r')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
show()
def to_layout(self, **kwargs):
"""Turn coordinates into an MNE Layout object.
Normalizes by the image you used to generate clicks
Parameters
----------
**kwargs : dict
Arguments are passed to generate_2d_layout
"""
from mne.channels.layout import generate_2d_layout
coords = np.array(self.coords)
lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs)
return lt
def _fake_click(fig, ax, point, xform='ax', button=1):
"""Helper to fake a click at a relative point within axes."""
if xform == 'ax':
x, y = ax.transAxes.transform_point(point)
elif xform == 'data':
x, y = ax.transData.transform_point(point)
else:
raise ValueError('unknown transform')
try:
fig.canvas.button_press_event(x, y, button, False, None)
except Exception: # for old MPL
fig.canvas.button_press_event(x, y, button, False)
def add_background_image(fig, im, set_ratios=None):
"""Add a background image to a plot.
Adds the image specified in `im` to the
figure `fig`. This is generally meant to
be done with topo plots, though it could work
for any plot.
Note: This modifies the figure and/or axes
in place.
Parameters
----------
fig: plt.figure
The figure you wish to add a bg image to.
im: ndarray
A numpy array that works with a call to
plt.imshow(im). This will be plotted
as the background of the figure.
set_ratios: None | str
Set the aspect ratio of any axes in fig
to the value in set_ratios. Defaults to None,
which does nothing to axes.
Returns
-------
ax_im: instance of the create matplotlib axis object
corresponding to the image you added.
Notes
-----
.. versionadded:: 0.9.0
"""
if set_ratios is not None:
for ax in fig.axes:
ax.set_aspect(set_ratios)
ax_im = fig.add_axes([0, 0, 1, 1])
ax_im.imshow(im, aspect='auto')
ax_im.set_zorder(-1)
return ax_im
| bsd-3-clause |
Kebniss/TalkingData-Mobile-User-Demographics | src/features/make_training_set.py | 1 | 1578 | import os
import numpy as np
import pandas as pd
from os import path
from scipy import sparse, io
from scipy.sparse import csr_matrix, hstack
from dotenv import load_dotenv, find_dotenv
dotenv_path = find_dotenv()
load_dotenv(dotenv_path)
FEATURES_DATA_DIR = os.environ.get("FEATURES_DIR")
# MAKE SPARSE FEATURES -------------------------------------------------------
phone_s = io.mmread(path.join(FEATURES_DATA_DIR, 'sparse_brand_model_price_train'))
app_labels_s = io.mmread(path.join(FEATURES_DATA_DIR, 'sparse_cum_app_labels_train'))
distance_s = io.mmread(path.join(FEATURES_DATA_DIR, 'sparse_position_train'))
train = hstack((phone_s, app_labels_s, distance_s), format='csr')
io.mmwrite(path.join(FEATURES_DATA_DIR, 'sparse_train_p_al_d'), train)
# MAKE DENSE FEATURES --------------------------------------------------------
phone_d = pd.read_csv(path.join(FEATURES_DATA_DIR, 'dense_brand_model_price_train.csv')
, index_col='trainrow').drop(['device_id'], 1)
app_labels_d = pd.read_csv(path.join(FEATURES_DATA_DIR, 'dense_500SVD_cum_app_labels_train.csv'))
distance_d = pd.read_csv(path.join(FEATURES_DATA_DIR, 'dense_position_train.csv')
, index_col='trainrow').drop(['device_id'], 1)
train = (phone_d.join(app_labels_d, how='outer')
.join(distance_d, how='outer'))
# fill nan with average value of feature = less information value
for col in train.columns:
train[col] = train[col].fillna(train[col].mean(0))
train.to_csv(path.join(FEATURES_DATA_DIR, 'dense_train_p_al_d.csv'), index=False)
| mit |
cbertinato/pandas | pandas/tests/series/test_combine_concat.py | 1 | 15498 | from datetime import datetime
import numpy as np
from numpy import nan
import pytest
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, date_range
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestSeriesCombine:
def test_append(self, datetime_series, string_series, object_series):
appendedSeries = string_series.append(object_series)
for idx, value in appendedSeries.items():
if idx in string_series.index:
assert value == string_series[idx]
elif idx in object_series.index:
assert value == object_series[idx]
else:
raise AssertionError("orphaned index!")
msg = "Indexes have overlapping values:"
with pytest.raises(ValueError, match=msg):
datetime_series.append(datetime_series, verify_integrity=True)
def test_append_many(self, datetime_series):
pieces = [datetime_series[:5], datetime_series[5:10],
datetime_series[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, datetime_series)
def test_append_duplicates(self):
# GH 13677
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([4, 5, 6])
exp = pd.Series([1, 2, 3, 4, 5, 6], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(s1.append(s2), exp)
tm.assert_series_equal(pd.concat([s1, s2]), exp)
# the result must have RangeIndex
exp = pd.Series([1, 2, 3, 4, 5, 6])
tm.assert_series_equal(s1.append(s2, ignore_index=True),
exp, check_index_type=True)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True),
exp, check_index_type=True)
msg = 'Indexes have overlapping values:'
with pytest.raises(ValueError, match=msg):
s1.append(s2, verify_integrity=True)
with pytest.raises(ValueError, match=msg):
pd.concat([s1, s2], verify_integrity=True)
def test_combine_scalar(self):
# GH 21248
# Note - combine() with another Series is tested elsewhere because
# it is used when testing operators
s = pd.Series([i * 10 for i in range(5)])
result = s.combine(3, lambda x, y: x + y)
expected = pd.Series([i * 10 + 3 for i in range(5)])
tm.assert_series_equal(result, expected)
result = s.combine(22, lambda x, y: min(x, y))
expected = pd.Series([min(i * 10, 22) for i in range(5)])
tm.assert_series_equal(result, expected)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
tm.assert_series_equal(combined, series)
# Holes filled from input
combined = series_copy.combine_first(series)
assert np.isfinite(combined).all()
tm.assert_series_equal(combined[::2], series[::2])
tm.assert_series_equal(combined[1::2], series_copy[1::2])
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_series_equal(strings, combined.loc[index[::2]])
tm.assert_series_equal(floats[1::2].astype(object),
combined.loc[index[1::2]])
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
s.index = s.index.astype('O')
assert_series_equal(s, result)
def test_update(self):
s = Series([1.5, nan, 3., 4., nan])
s2 = Series([nan, 3.5, nan, 5.])
s.update(s2)
expected = Series([1.5, 3.5, 3., 5., np.nan])
assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
df['c'].update(Series(['foo'], index=[0]))
expected = DataFrame([[1, np.nan, 'foo'], [3, 2., np.nan]],
columns=['a', 'b', 'c'])
assert_frame_equal(df, expected)
@pytest.mark.parametrize('other, dtype, expected', [
# other is int
([61, 63], 'int32', pd.Series([10, 61, 12], dtype='int32')),
([61, 63], 'int64', pd.Series([10, 61, 12])),
([61, 63], float, pd.Series([10., 61., 12.])),
([61, 63], object, pd.Series([10, 61, 12], dtype=object)),
# other is float, but can be cast to int
([61., 63.], 'int32', pd.Series([10, 61, 12], dtype='int32')),
([61., 63.], 'int64', pd.Series([10, 61, 12])),
([61., 63.], float, pd.Series([10., 61., 12.])),
([61., 63.], object, pd.Series([10, 61., 12], dtype=object)),
# others is float, cannot be cast to int
([61.1, 63.1], 'int32', pd.Series([10., 61.1, 12.])),
([61.1, 63.1], 'int64', pd.Series([10., 61.1, 12.])),
([61.1, 63.1], float, pd.Series([10., 61.1, 12.])),
([61.1, 63.1], object, pd.Series([10, 61.1, 12], dtype=object)),
# other is object, cannot be cast
([(61,), (63,)], 'int32', pd.Series([10, (61,), 12])),
([(61,), (63,)], 'int64', pd.Series([10, (61,), 12])),
([(61,), (63,)], float, pd.Series([10., (61,), 12.])),
([(61,), (63,)], object, pd.Series([10, (61,), 12]))
])
def test_update_dtypes(self, other, dtype, expected):
s = Series([10, 11, 12], dtype=dtype)
other = Series(other, index=[1, 3])
s.update(other)
assert_series_equal(s, expected)
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ['float64', 'int8', 'uint8', 'bool', 'm8[ns]',
'M8[ns]'])
for dtype in dtypes:
assert pd.concat([Series(dtype=dtype)]).dtype == dtype
assert pd.concat([Series(dtype=dtype),
Series(dtype=dtype)]).dtype == dtype
def int_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {'i', 'u', 'b'}) and (dtype.kind == 'i' or
dtype2.kind == 'i'):
return 'i'
elif not len(typs - {'u', 'b'}) and (dtype.kind == 'u' or
dtype2.kind == 'u'):
return 'u'
return None
def float_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {'f', 'i', 'u'}) and (dtype.kind == 'f' or
dtype2.kind == 'f'):
return 'f'
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return 'O'
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = pd.concat([Series(dtype=dtype), Series(dtype=dtype2)
]).dtype
assert result.kind == expected
def test_combine_first_dt_tz_values(self, tz_naive_fixture):
ser1 = pd.Series(pd.DatetimeIndex(['20150101', '20150102', '20150103'],
tz=tz_naive_fixture),
name='ser1')
ser2 = pd.Series(pd.DatetimeIndex(['20160514', '20160515', '20160516'],
tz=tz_naive_fixture),
index=[2, 3, 4], name='ser2')
result = ser1.combine_first(ser2)
exp_vals = pd.DatetimeIndex(['20150101', '20150102', '20150103',
'20160515', '20160516'],
tz=tz_naive_fixture)
exp = pd.Series(exp_vals, name='ser1')
assert_series_equal(exp, result)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
def test_concat_empty_series_dtypes(self):
# booleans
assert pd.concat([Series(dtype=np.bool_),
Series(dtype=np.int32)]).dtype == np.int32
assert pd.concat([Series(dtype=np.bool_),
Series(dtype=np.float32)]).dtype == np.object_
# datetime-like
assert pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.bool)]).dtype == np.object_
assert pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.int64)]).dtype == np.object_
assert pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool)]).dtype == np.object_
assert pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.int64)]).dtype == np.object_
assert pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool_),
Series(dtype=np.int64)]).dtype == np.object_
# categorical
assert pd.concat([Series(dtype='category'),
Series(dtype='category')]).dtype == 'category'
# GH 18515
assert pd.concat([Series(np.array([]), dtype='category'),
Series(dtype='float64')]).dtype == 'float64'
assert pd.concat([Series(dtype='category'),
Series(dtype='object')]).dtype == 'object'
# sparse
# TODO: move?
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='float64').to_sparse()])
assert result.dtype == 'Sparse[float64]'
# GH 26705 - Assert .ftype is deprecated
with tm.assert_produces_warning(FutureWarning):
assert result.ftype == 'float64:sparse'
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='float64')])
# TODO: release-note: concat sparse dtype
expected = pd.core.sparse.api.SparseDtype(np.float64)
assert result.dtype == expected
# GH 26705 - Assert .ftype is deprecated
with tm.assert_produces_warning(FutureWarning):
assert result.ftype == 'float64:sparse'
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='object')])
# TODO: release-note: concat sparse dtype
expected = pd.core.sparse.api.SparseDtype('object')
assert result.dtype == expected
# GH 26705 - Assert .ftype is deprecated
with tm.assert_produces_warning(FutureWarning):
assert result.ftype == 'object:sparse'
def test_combine_first_dt64(self):
from pandas.core.tools.datetimes import to_datetime
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = to_datetime(Series([np.NaN, "2011"]))
rs = s0.combine_first(s1)
xp = to_datetime(Series(['2010', '2011']))
assert_series_equal(rs, xp)
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = Series([np.NaN, "2011"])
rs = s0.combine_first(s1)
xp = Series([datetime(2010, 1, 1), '2011'])
assert_series_equal(rs, xp)
class TestTimeseries:
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
tm.assert_index_equal(result.index, ex_index)
tm.assert_index_equal(result_df.index, ex_index)
appended = rng.append(rng)
tm.assert_index_equal(appended, ex_index)
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
tm.assert_index_equal(appended, ex_index)
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
assert rng1.append(rng1).name == 'foo'
assert rng1.append(rng2).name is None
def test_append_concat_tz(self):
# see gh-2938
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
def test_append_concat_tz_explicit_pytz(self):
# see gh-2938
from pytz import timezone as timezone
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz=timezone('US/Eastern'))
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz=timezone('US/Eastern'))
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz=timezone('US/Eastern'))
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
def test_append_concat_tz_dateutil(self):
# see gh-2938
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='dateutil/US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='dateutil/US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='dateutil/US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
| bsd-3-clause |
sertansenturk/tomato | src/tomato/joint/alignedpitchfilter.py | 1 | 10166 | # Copyright 2015 - 2018 Sertan Şentürk
#
# This file is part of tomato: https://github.com/sertansenturk/tomato/
#
# tomato is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License v3.0
# along with this program. If not, see http://www.gnu.org/licenses/
#
# If you are using this extractor please cite the following thesis:
#
# Şentürk, S. (2016). Computational analysis of audio recordings and music
# scores for the description and discovery of Ottoman-Turkish makam music.
# PhD thesis, Universitat Pompeu Fabra, Barcelona, Spain.
import copy
import matplotlib.pyplot as plt
import numpy as np
class AlignedPitchFilter:
def __init__(self):
self.max_boundary_tol = 3 # seconds
def filter(self, pitch, notes):
# IMPORTANT: In the audio-score alignment step, the pitch value
# of each note is computed from the theoretical pitch distance
# from the tonic and de-normalized according to the tonic
# frequency of the performance. The value is not computed from
# THE PITCH TRAJECTORY OF THE NOTE; it is just the THEORETICAL
# FREQUENCY OF THE NOTE SYMBOL ACCORDING TO THE TONIC FREQUENCY
# The performed stable pitch of the note will be computed in the
# aligned-note-models
pitch_corrected = np.copy(pitch)
notes_corrected = copy.deepcopy(notes)
notes_corrected = self._remove_rests_and_skipped_notes(notes_corrected)
# group the notes into sections
synth_pitch = self._notes_to_synth_pitch(
notes_corrected, pitch_corrected[:, 0])
# octave correction
for i, sp in enumerate(synth_pitch[:, 1]):
pitch_corrected[i][1] = self._move_to_closest_octave(
pitch_corrected[i][1], sp)
self._get_pitch_trajectories(notes_corrected, pitch_corrected)
return pitch_corrected, notes_corrected, synth_pitch.tolist()
@staticmethod
def _remove_rests_and_skipped_notes(notes_corrected):
# remove skipped notes
notes_corrected = ([n for n in notes_corrected
if not n['Interval'][0] == n['Interval'][1]])
# remove rests
notes_corrected = [n for n in notes_corrected if
n['TheoreticalPitch']['Value']]
return notes_corrected
@staticmethod
def _get_pitch_trajectories(notes_corrected, pitch_corrected):
for nc in notes_corrected:
# TODO: Fix FutureWarning: arrays to stack must be passed as a
# "sequence" type such as list or tuple. Support for non-sequence
# iterables such as generators is deprecated as of NumPy 1.16 and
# will raise an error in the future.
trajectory = np.vstack(
p[1] for p in pitch_corrected
if nc['Interval'][0] <= p[0] <= nc['Interval'][1])
nc['PerformedPitch']['Value'] = np.median(trajectory).tolist()
def _notes_to_synth_pitch(self, notes, time_stamps):
synth_pitch = np.array([0] * len(time_stamps))
for i, _ in enumerate(notes):
prevlabel = ([] if i == 0 else
notes[i - 1]['Label'].split('--')[0])
label = notes[i]['Label'].split('--')[0]
nextlabel = ([] if i == len(notes) - 1 else
notes[i + 1]['Label'].split('--')[0])
# let the synthetic pitch continue in the boundaries a little bit
# more
startidx = self._preinterpolate_synth(
i, label, notes, prevlabel, time_stamps)
self._postinterpolate_synth(i, label, nextlabel, notes, startidx,
synth_pitch, time_stamps)
# add time_stamps
synth_pitch = np.transpose(np.vstack((time_stamps, synth_pitch)))
return synth_pitch
def _postinterpolate_synth(self, i, label, next_label, notes, start_idx,
synth_pitch, time_stamps):
if not next_label:
# post interpolation end time on the last note
endidx = self._find_closest_sample_idx(
notes[i]['Interval'][1] + self.max_boundary_tol,
time_stamps)
elif not label == next_label:
# post interpolation end time on a group end
nextstartidx = self._find_closest_sample_idx(
notes[i + 1]['Interval'][0], time_stamps)
endidx = self._find_closest_sample_idx(
notes[i]['Interval'][1] + self.max_boundary_tol,
time_stamps[:nextstartidx])
else:
# post interpolation within a group
nextstartidx = self._find_closest_sample_idx(
notes[i + 1]['Interval'][0], time_stamps)
endidx = nextstartidx - 1
synth_pitch[start_idx:endidx + 1] = \
notes[i]['TheoreticalPitch']['Value']
def _preinterpolate_synth(self, i, label, notes, prevlabel, time_stamps):
# pre interpolation start time on the first note
if not prevlabel:
startidx = self._find_closest_sample_idx(
notes[i]['Interval'][0] - self.max_boundary_tol,
time_stamps)
elif not label == prevlabel:
# post interpolation start time on a group start
# recalculate the end time of the previous
tempstartidx = self._find_closest_sample_idx(
notes[i]['Interval'][0], time_stamps)
prevendidx = self._find_closest_sample_idx(
notes[i - 1]['Interval'][1] + self.max_boundary_tol,
time_stamps[:tempstartidx])
startidx = prevendidx + self._find_closest_sample_idx(
notes[i]['Interval'][0] - self.max_boundary_tol,
time_stamps[prevendidx:]) + 1
else: # no pre interpolation
startidx = self._find_closest_sample_idx(
notes[i]['Interval'][0], time_stamps)
return startidx
@staticmethod
def _find_closest_sample_idx(val, sample_vals):
return np.argmin(abs(sample_vals - val))
@classmethod
def _move_to_closest_octave(cls, pp, sp):
minpp = pp
if not (pp in [0, np.nan] or sp in [0, np.nan]):
cent_diff = cls._hz2cent(pp, sp)
octave_cands = [cent_diff, cent_diff - 1200]
cand_dist = [abs(oc) for oc in octave_cands]
closest_cent_diff = octave_cands[cand_dist.index(min(cand_dist))]
minpp = cls._cent2hz(closest_cent_diff, sp)
return minpp
@staticmethod
def _cent2hz(cent_val, ref_hz):
try:
return ref_hz * 2 ** (cent_val / 1200.0)
except TypeError: # _NaN_; rest
return None
@staticmethod
def _hz2cent(val, ref_hz):
return 1200.0 * np.log2(val / ref_hz)
@staticmethod
def _decompose_into_chunks(pitch, bottom_limit=0.7, upper_limit=1.3):
pitch_chunks = []
temp_pitch = np.array([])
# starts at the first sample
for i in range(1, len(pitch) - 1):
# ignore chunks with 0 frequency values
if pitch[i][1] == 0:
pass
# non-zero chunks
else:
interval = float(pitch[i + 1][1]) / float(pitch[i][1])
temp_pitch = (np.vstack((temp_pitch, pitch[i]))
if temp_pitch.size > 0 else pitch[i])
if not bottom_limit < interval < upper_limit:
if temp_pitch:
pitch_chunks.append(temp_pitch)
temp_pitch = np.array([])
if temp_pitch:
pitch_chunks.append(temp_pitch)
return pitch_chunks
@staticmethod
def plot(pitch, pitch_corrected, notes_corrected):
# remove zeros for plotting
pitch_plot = np.copy(pitch)
pitch_plot[pitch_plot[:, 1] == 0, 1] = np.NAN
pitch_corrected_plot = np.copy(pitch_corrected)
pitch_corrected_plot[pitch_corrected_plot[:, 1] == 0, 1] = np.NAN
_, ax = plt.subplots()
# plot pitch tracks
ax.plot(pitch_plot[:, 0], pitch_plot[:, 1], 'g', label='Pitch',
linewidth=3, alpha=0.7)
ax.plot(pitch_corrected_plot[:, 0], pitch_corrected_plot[:, 1], 'b',
linewidth=1, label=u'Corrected Pitch')
plt.xlabel('Time (sec)')
plt.ylabel('Frequency (Hz)')
plt.grid(True)
# plot notes except the last one
for note in notes_corrected[:-1]:
ax.plot(note['Interval'], [note['PerformedPitch']['Value'],
note['PerformedPitch']['Value']],
'r', alpha=0.4, linewidth=6)
# plot last note for labeling
dmy_note = notes_corrected[-1]
ax.plot(dmy_note['Interval'], [dmy_note['PerformedPitch']['Value'],
dmy_note['PerformedPitch']['Value']],
'r', label=u'Aligned Notes', alpha=0.4, linewidth=6)
# set y axis limits
pitch_vals = np.hstack((pitch_plot[:, 1], pitch_corrected_plot[:, 1]))
pitch_vals = pitch_vals[~np.isnan(pitch_vals)]
min_y = np.min(pitch_vals)
max_y = np.max(pitch_vals)
range_y = max_y - min_y
ax.set_ylim([min_y - range_y * 0.1, max_y + range_y * 0.1])
# set x axis limits
time_vals = np.hstack((pitch_plot[:, 0], pitch_corrected_plot[:, 0]))
min_x = np.min(time_vals)
max_x = np.max(time_vals)
ax.set_xlim([min_x, max_x])
# place legend
ax.legend(loc='upper right')
| agpl-3.0 |
DJArmstrong/autovet | Features/old/Centroiding/scripts/old/detrend_centroid_external.py | 2 | 12683 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 25 14:57:36 2016
@author:
Maximilian N. Guenther
Battcock Centre for Experimental Astrophysics,
Cavendish Laboratory,
JJ Thomson Avenue
Cambridge CB3 0HE
Email: mg719@cam.ac.uk
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from astropy.stats import sigma_clip
import pandas as pd
import lightcurve_tools
#import get_scatter_color
#from scipy.optimize import lstsq
#from scipy.stats import sigmaclip
from scipy.optimize import least_squares
from index_transits import index_transits
def run(dic, dic_nb, flattening='constant'):
'''
0) Remove a generall offset of the curve over the full time array
1) Flatten the curves of the target and all neighbours per night
2) Find the most correlated neighbours
3) Detrend the target by the sigma-clipped mean of the most correlated neighbours
4) remove remaining airmass trends from 1 siderial day phasecurve
_f : flattened (linear trend and offset removed per night)
_ref : reference curve computed from neighbouring stars
_fd : flattened and detrended target star
_fda: flattend, detrended, and airmass-detrended (1 siderial day) target star
'''
#TODO: only calculate correlatin / fit out of transit!!!
#::: set parameters
N_neighbours = len(dic_nb['OBJ_ID'])
# N_exp = len(dic['HJD'])
N_top = 5
ind_bp, exposures_per_night, obstime_per_night = breakpoints(dic)
ind_tr, ind_tr_half, ind_tr_double, ind_out, ind_out_per_tr, tmid = index_transits(dic)
#::: 0)
dic['CENTDX_f'] = detrend_global(dic, dic['CENTDX'], ind_out)
dic['CENTDY_f'] = detrend_global(dic, dic['CENTDY'], ind_out)
dic_nb['CENTDX_f'] = np.zeros( dic_nb['CENTDX'].shape )
dic_nb['CENTDY_f'] = np.zeros( dic_nb['CENTDY'].shape )
for i in range(N_neighbours):
dic_nb['CENTDX_f'][i,:] = detrend_global(dic, dic_nb['CENTDX'][i,:], ind_out)
dic_nb['CENTDY_f'][i,:] = detrend_global(dic, dic_nb['CENTDY'][i,:], ind_out)
#::: 1)
# dic['CENTDX_f'] = detrend_scipy(dic['CENTDX'], ind_bp, flattening)
# dic['CENTDY_f'] = detrend_scipy(dic['CENTDY'], ind_bp, flattening)
#
# dic_nb['CENTDX_f'] = np.zeros( dic_nb['CENTDX'].shape )
# dic_nb['CENTDY_f'] = np.zeros( dic_nb['CENTDY'].shape )
#
# for i in range(N_neighbours):
# dic_nb['CENTDX_f'][i,:] = detrend_scipy(dic_nb['CENTDX'][i,:], ind_bp, flattening)
# dic_nb['CENTDY_f'][i,:] = detrend_scipy(dic_nb['CENTDY'][i,:], ind_bp, flattening)
dic['CENTDX_f'] = detrend_per_night(dic, dic['CENTDX_f'], ind_out)
dic['CENTDY_f'] = detrend_per_night(dic, dic['CENTDY_f'], ind_out)
# dic_nb['CENTDX_f'] = np.zeros( dic_nb['CENTDX'].shape )
# dic_nb['CENTDY_f'] = np.zeros( dic_nb['CENTDY'].shape )
for i in range(N_neighbours):
dic_nb['CENTDX_f'][i,:] = detrend_per_night(dic, dic_nb['CENTDX_f'][i,:], ind_out)
dic_nb['CENTDY_f'][i,:] = detrend_per_night(dic, dic_nb['CENTDY_f'][i,:], ind_out)
#::: 2)
dic_nb['corrcoeff_x'] = np.zeros(N_neighbours) * np.nan
dic_nb['corrcoeff_y'] = np.zeros(N_neighbours) * np.nan
a_x = pd.Series( dic['CENTDX_f'] )
a_y = pd.Series( dic['CENTDY_f'] )
for i in range(dic_nb['CENTDX'].shape[0]):
b_x = pd.Series( dic_nb['CENTDX_f'][i] )
b_y = pd.Series( dic_nb['CENTDY_f'][i] )
dic_nb['corrcoeff_x'][i] = a_x.corr( b_x )
dic_nb['corrcoeff_y'][i] = a_y.corr( b_y )
#pick the N_top highest corrcoeffs
ind_x = np.argpartition(dic_nb['corrcoeff_x'], -N_top)[-N_top:]
ind_y = np.argpartition(dic_nb['corrcoeff_y'], -N_top)[-N_top:]
#::: 3)
#TODO: remove hardcoding. what is this?!
dt=0.01
period=0.9972695787*24.*3600.
centdx = dic['CENTDX_f']
centdy = dic['CENTDY_f']
hjd_phase, centdx_phase, centdx_phase_err, _, _ = lightcurve_tools.phase_fold( dic['HJD'], centdx, period, dic['HJD'][0], dt = dt, ferr_type='meansig', ferr_style='sem', sigmaclip=True)
_, centdy_phase, centdy_phase_err, _, _ = lightcurve_tools.phase_fold( dic['HJD'], centdy, period, dic['HJD'][0], dt = dt, ferr_type='meansig', ferr_style='sem', sigmaclip=True)
xx_phase = np.zeros((N_top, len(hjd_phase)))
for i, ind in enumerate(ind_x):
xx = dic_nb['CENTDX_f'][ ind, : ]
_, xx_phase[i], _, _, _ = lightcurve_tools.phase_fold( dic['HJD'], xx, period, dic['HJD'][0], dt = dt, ferr_type='meansig', ferr_style='sem', sigmaclip=True)
yy_phase = np.zeros((N_top, len(hjd_phase)))
for i, ind in enumerate(ind_y):
yy = dic_nb['CENTDX_f'][ ind, : ]
_, yy_phase[i], _, _, _ = lightcurve_tools.phase_fold( dic['HJD'], yy, period, dic['HJD'][0], dt = dt, ferr_type='meansig', ferr_style='sem', sigmaclip=True)
def errfct( weights, centdxy_phase, xy_phase ):
return centdxy_phase - np.average(xy_phase, axis=0, weights=weights)
p0 = np.ones( N_top ) #initial guess, all have the same weight
params_x = least_squares(errfct, p0[:], bounds=(np.zeros(N_top), np.ones(N_top)), args=(centdx_phase, xx_phase))
weights_x = params_x.x #'.x' calls the parameters of the fit, ie. here the weights
# print 'x result:', params_x.x
params_y = least_squares(errfct, p0[:], bounds=(np.zeros(N_top), np.ones(N_top)), args=(centdy_phase, yy_phase))
weights_y = params_y.x
# print 'y result:', params_y.x
dic_nb['CENTDX_ref_mean'] = np.average(dic_nb['CENTDX_f'][ ind_x, : ], axis=0, weights=weights_x)
dic_nb['CENTDY_ref_mean'] = np.average(dic_nb['CENTDY_f'][ ind_y, : ], axis=0, weights=weights_y)
dic['CENTDX_fd'] = dic['CENTDX_f'] - dic_nb['CENTDX_ref_mean']
dic['CENTDY_fd'] = dic['CENTDY_f'] - dic_nb['CENTDY_ref_mean']
#::: flatten again per night
# dic['CENTDX_fd'] = detrend_scipy(dic['CENTDX_fd'], ind_bp, flattening)
# dic['CENTDY_fd'] = detrend_scipy(dic['CENTDY_fd'], ind_bp, flattening)
dic['CENTDX_fd'] = detrend_per_night(dic, dic['CENTDX_fd'], ind_out)
dic['CENTDY_fd'] = detrend_per_night(dic, dic['CENTDY_fd'], ind_out)
#:::4)
dic = fit_phasecurve_1_siderial_day( dic )
#::: flatten again per night
# dic['CENTDX_fda'] = detrend_scipy(dic['CENTDX_fda'], ind_bp, flattening)
# dic['CENTDY_fda'] = detrend_scipy(dic['CENTDY_fda'], ind_bp, flattening)
dic['CENTDX_fd'] = detrend_per_night(dic, dic['CENTDX_fda'], ind_out)
dic['CENTDY_fd'] = detrend_per_night(dic, dic['CENTDY_fda'], ind_out)
return dic, dic_nb
def breakpoints(dic):
'''
Mark breakpoints (start and end of nights)
'''
ind_bp = [0]
exposures_per_night = []
obstime_per_night = []
for i, date in enumerate(dic['UNIQUE_NIGHT']):
#::: get the exposures of this night
ind = np.where( dic['NIGHT'] == date )[0]
exposures_per_night.append( len(ind) )
obstime_per_night.append( dic['HJD'][ind[-1]] - dic['HJD'][ind[0]] )
# ind_bp.append(ind[0])
ind_bp.append(ind[-1])
return ind_bp, np.array(exposures_per_night), np.array(obstime_per_night)
def detrend_scipy(data, ind_bp, flattening):
'''
Use scipy to detrend each object and each night individually
(seperated by breakpoints ind_bp)
1) flattening='constant': remove constant trend (offset)
2) flattening='linear': remove linear trend (offset+slope)
3) flattening='none': do not remove any trend
Warning: method 2) can cause the transit signal to vanish!
'''
# print 'detrend scipy running'
# print 'breakfpoints'
# print ind_bp
if flattening=='constant':
ind_nan = np.isnan(data)
data[ ind_nan ] = 0
# data = np.masked_invalid( data )
data_detrended = signal.detrend(data, type=flattening, bp=ind_bp)
data_detrended[ ind_nan ] = np.nan
return data_detrended
else:
return data
def detrend_per_night(dic, data, ind_out):
#::: copy data array
data_detrended = 1.*data
for i, date in enumerate(dic['UNIQUE_NIGHT']):
#TODO: this sometimes breaks with error message
# File "/appch/data1/mg719/programs/anaconda/lib/python2.7/site-packages/astropy/stats/sigma_clipping.py", line 208, in _sigma_clip
# perform_clip(filtered_data, kwargs)
# File "/appch/data1/mg719/programs/anaconda/lib/python2.7/site-packages/astropy/stats/sigma_clipping.py", line 180, in perform_clip
# _filtered_data.mask |= _filtered_data < min_value
#TypeError: ufunc 'bitwise_or' not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ''safe''
try:
#::: select one night + select out of transit
ind_date = np.where( dic['NIGHT'] == date )[0]
ind = np.intersect1d( ind_date, ind_out )
#::: calculate sigma clipped mean (using astropy)
offset = np.mean( sigma_clip(data[ ind ]) )
data_detrended[ ind_date ] -= offset
except:
pass
return data_detrended
def detrend_global(dic, data, ind_out):
#::: copy data array
data_detrended = 1.*data
#::: select out of transit
ind = ind_out
#::: calculate sigma clipped mean (using astropy)
offset = np.mean( sigma_clip(data[ ind ]) )
data_detrended -= offset
return data_detrended
def fit_phasecurve_1_siderial_day( dic, dt=0.01, period=0.9972695787*24.*3600. ):
'''
1 mean siderial day =
( 23 + 56/60. + 4.0916/3600. ) / 24. = 0.9972695787 days
'''
#::: phasefold to one siderial day
t0 = ( np.int( dic['HJD'][0]/24./3600. ) ) * 24.*3600.
dic['HJD_PHASE_1sidday'], dic['COLOR_PHASE_1sidday'], dic['COLOR_PHASE_1sidday_ERR'], _, _ = lightcurve_tools.phase_fold( dic['HJD'], dic['COLOR'], period, t0, dt = dt, ferr_type='meansig', ferr_style='sem', sigmaclip=True)
_, dic['AIRMASS_PHASE_1sidday'], dic['AIRMASS_PHASE_1sidday_ERR'], _, _ = lightcurve_tools.phase_fold( dic['HJD'], dic['AIRMASS'], period, t0, dt = dt, ferr_type='meansig', ferr_style='sem', sigmaclip=True)
_, dic['SYSREM_FLUX3_PHASE_1sidday'], dic['SYSREM_FLUX3_PHASE_1sidday_ERR'], _, _ = lightcurve_tools.phase_fold(dic['HJD'], dic['SYSREM_FLUX3'] / np.nanmedian(dic['SYSREM_FLUX3']), period, t0, dt = dt, ferr_type='meansig', ferr_style='std', sigmaclip=True)
_, dic['CENTDX_fd_PHASE_1sidday'], dic['CENTDX_fd_PHASE_1sidday_ERR'], _, _ = lightcurve_tools.phase_fold( dic['HJD'], dic['CENTDX_fd'], period, t0, dt = dt, ferr_type='meansig', ferr_style='std', sigmaclip=True)
_, dic['CENTDY_fd_PHASE_1sidday'], dic['CENTDY_fd_PHASE_1sidday_ERR'], _, _ = lightcurve_tools.phase_fold( dic['HJD'], dic['CENTDY_fd'], period, t0, dt = dt, ferr_type='meansig', ferr_style='std', sigmaclip=True)
#::: fit out the trend and save the resulting polyfunction in the dic
polyfit_params = np.polyfit( dic['HJD_PHASE_1sidday'], dic['CENTDX_fd_PHASE_1sidday'], 2 )
dic['polyfct_CENTDX'] = np.poly1d( polyfit_params )
polyfit_params = np.polyfit( dic['HJD_PHASE_1sidday'], dic['CENTDY_fd_PHASE_1sidday'], 2 )
dic['polyfct_CENTDY'] = np.poly1d( polyfit_params )
#::: unwrap the phase-folding
dx = ( (dic['HJD'] - t0) % (period) ) / period
dic['poly_CENTDX'] = dic['polyfct_CENTDX'](dx)
dic['poly_CENTDY'] = dic['polyfct_CENTDY'](dx)
dic['CENTDX_fda'] = dic['CENTDX_fd'] - dic['poly_CENTDX']
dic['CENTDY_fda'] = dic['CENTDY_fd'] - dic['poly_CENTDY']
return dic
def run_example(dic, dic_nb):
'''
For test purposes
'''
#::: choose example data
data = dic['CENTDX']
time = dic['HJD']
ind_bp = breakpoints(dic)
data_detrended = detrend_scipy(data, ind_bp)
plot(data, data_detrended, time)
def plot(data, data_detrended, time):
'''
For test purposes
'''
fig, axes = plt.subplots(2,1, sharex=True, figsize=(12,4))
N_exp = 1e4
ax = axes[0]
ax.scatter( np.arange(N_exp), data[0:N_exp], c=time[0:N_exp].astype(int), rasterized=True, cmap='jet' )
ax = axes[1]
ax.scatter( np.arange(N_exp), data_detrended[0:N_exp], c=time[0:N_exp].astype(int), rasterized=True, cmap='jet' )
ax.set( xlim=[0,N_exp] )
| gpl-3.0 |
nguyentu1602/statsmodels | statsmodels/sandbox/distributions/examples/ex_mvelliptical.py | 34 | 5169 | # -*- coding: utf-8 -*-
"""examples for multivariate normal and t distributions
Created on Fri Jun 03 16:00:26 2011
@author: josef
for comparison I used R mvtnorm version 0.9-96
"""
from __future__ import print_function
import numpy as np
import statsmodels.sandbox.distributions.mv_normal as mvd
from numpy.testing import assert_array_almost_equal
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
#************** multivariate normal distribution ***************
mvn3 = mvd.MVNormal(mu, cov3)
#compare with random sample
x = mvn3.rvs(size=1000000)
xli = [[2., 1., 1.5],
[0., 2., 1.5],
[1.5, 1., 2.5],
[0., 1., 1.5]]
xliarr = np.asarray(xli).T[None,:, :]
#from R session
#pmvnorm(lower=-Inf,upper=(x[0,.]-mu)/sqrt(diag(cov3)),mean=rep(0,3),corr3)
r_cdf = [0.3222292, 0.3414643, 0.5450594, 0.3116296]
r_cdf_errors = [1.715116e-05, 1.590284e-05, 5.356471e-05, 3.567548e-05]
n_cdf = [mvn3.cdf(a) for a in xli]
assert_array_almost_equal(r_cdf, n_cdf, decimal=4)
print(n_cdf)
print('')
print((x<np.array(xli[0])).all(-1).mean(0))
print((x[...,None]<xliarr).all(1).mean(0))
print(mvn3.expect_mc(lambda x: (x<xli[0]).all(-1), size=100000))
print(mvn3.expect_mc(lambda x: (x[...,None]<xliarr).all(1), size=100000))
#other methods
mvn3n = mvn3.normalized()
assert_array_almost_equal(mvn3n.cov, mvn3n.corr, decimal=15)
assert_array_almost_equal(mvn3n.mean, np.zeros(3), decimal=15)
xn = mvn3.normalize(x)
xn_cov = np.cov(xn, rowvar=0)
assert_array_almost_equal(mvn3n.cov, xn_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xn.mean(0), decimal=2)
mvn3n2 = mvn3.normalized2()
assert_array_almost_equal(mvn3n.cov, mvn3n2.cov, decimal=2)
#mistake: "normalized2" standardizes - FIXED
#assert_array_almost_equal(np.eye(3), mvn3n2.cov, decimal=2)
xs = mvn3.standardize(x)
xs_cov = np.cov(xn, rowvar=0)
#another mixup xs is normalized
#assert_array_almost_equal(np.eye(3), xs_cov, decimal=2)
assert_array_almost_equal(mvn3.corr, xs_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xs.mean(0), decimal=2)
mv2m = mvn3.marginal(np.array([0,1]))
print(mv2m.mean)
print(mv2m.cov)
mv2c = mvn3.conditional(np.array([0,1]), [0])
print(mv2c.mean)
print(mv2c.cov)
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print(mv2c.mean)
print(mv2c.cov)
import statsmodels.api as sm
mod = sm.OLS(x[:,0], sm.add_constant(x[:,1:], prepend=True))
res = mod.fit()
print(res.model.predict(np.array([1,0,0])))
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print(mv2c.mean)
mv2c = mvn3.conditional(np.array([0]), [1, 1])
print(res.model.predict(np.array([1,1,1])))
print(mv2c.mean)
#the following wrong input doesn't raise an exception but produces wrong numbers
#mv2c = mvn3.conditional(np.array([0]), [[1, 1],[2,2]])
#************** multivariate t distribution ***************
mvt3 = mvd.MVT(mu, cov3, 4)
xt = mvt3.rvs(size=100000)
assert_array_almost_equal(mvt3.cov, np.cov(xt, rowvar=0), decimal=1)
mvt3s = mvt3.standardized()
mvt3n = mvt3.normalized()
#the following should be equal or correct up to numerical precision of float
assert_array_almost_equal(mvt3.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(mvt3n.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(np.eye(3), mvt3s.sigma, decimal=15)
xts = mvt3.standardize(xt)
xts_cov = np.cov(xts, rowvar=0)
xtn = mvt3.normalize(xt)
xtn_cov = np.cov(xtn, rowvar=0)
xtn_corr = np.corrcoef(xtn, rowvar=0)
assert_array_almost_equal(mvt3n.mean, xtn.mean(0), decimal=2)
#the following might fail sometimes (random test), add seed in tests
assert_array_almost_equal(mvt3n.corr, xtn_corr, decimal=1)
#watch out cov is not the same as sigma for t distribution, what's right here?
#normalize by sigma or by cov ? now normalized by sigma
assert_array_almost_equal(mvt3n.cov, xtn_cov, decimal=1)
assert_array_almost_equal(mvt3s.cov, xts_cov, decimal=1)
a = [0.0, 1.0, 1.5]
mvt3_cdf0 = mvt3.cdf(a)
print(mvt3_cdf0)
print((xt<np.array(a)).all(-1).mean(0))
print('R', 0.3026741) # "error": 0.0004832187
print('R', 0.3026855) # error 3.444375e-06 with smaller abseps
print('diff', mvt3_cdf0 - 0.3026855)
a = [0.0, 0.5, 1.0]
mvt3_cdf1 = mvt3.cdf(a)
print(mvt3_cdf1)
print((xt<np.array(a)).all(-1).mean(0))
print('R', 0.1946621) # "error": 0.0002524817)
print('R', 0.1946217) # "error:"2.748699e-06 with smaller abseps)
print('diff', mvt3_cdf1 - 0.1946217)
assert_array_almost_equal(mvt3_cdf0, 0.3026855, decimal=5)
assert_array_almost_equal(mvt3_cdf1, 0.1946217, decimal=5)
import statsmodels.distributions.mixture_rvs as mix
mu2 = np.array([4, 2.0, 2.0])
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
md = mix.mv_mixture_rvs([0.4, 0.6], 5, [mvt3, mvt3n], 3)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
#rvs2 = rvs[:,:2]
import matplotlib.pyplot as plt
fig = plt.figure()
fig.add_subplot(2, 2, 1)
plt.plot(rvs[:,0], rvs[:,1], '.', alpha=0.25)
plt.title('1 versus 0')
fig.add_subplot(2, 2, 2)
plt.plot(rvs[:,0], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 0')
fig.add_subplot(2, 2, 3)
plt.plot(rvs[:,1], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 1')
#plt.show()
| bsd-3-clause |
bnaul/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 11 | 25871 | """
Todo: cross-check the F-value with stats model
"""
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import assert_warns_message
from sklearn.utils import safe_mask
from sklearn.datasets import make_classification, make_regression
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert np.allclose(f, f2)
assert np.allclose(pv, pv2)
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert (F > 0).all()
assert (pv > 0).all()
assert (pv < 1).all()
assert (pv[:5] < 0.05).all()
assert (pv[5:] > 1.e-4).all()
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert (F > 0).all()
assert (pv > 0).all()
assert (pv < 1).all()
assert (pv[:5] < 0.05).all()
assert (pv[5:] > 1.e-4).all()
# with centering, compare with sparse
F, pv = f_regression(X, y, center=True)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=True)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert (F > 0).all()
assert (pv > 0).all()
assert (pv < 1).all()
assert (pv[:5] < 0.05).all()
assert (pv[5:] > 1.e-4).all()
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert sparse.issparse(X_r2inv)
support_mask = safe_mask(X_r2inv, support)
assert X_r2inv.shape == X.shape
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert X_r2inv.getnnz() == X_r.getnnz()
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert X_selected.shape == (20, 0)
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_almost_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
with pytest.raises(ValueError):
SelectPercentile(percentile=-1).fit(X, y)
with pytest.raises(ValueError):
SelectPercentile(percentile=101).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='percentile', param=-1).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='percentile', param=101).fit(X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=bool))
assert np.sum(support[5:] == 1) < 3
def test_boundary_case_ch2():
# Test boundary case, and always aim to select 1 feature.
X = np.array([[10, 20], [20, 20], [20, 30]])
y = np.array([[1], [0], [0]])
scores, pvalues = chi2(X, y)
assert_array_almost_equal(scores, np.array([4., 0.71428571]))
assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
filter_fdr = SelectFdr(chi2, alpha=0.1)
filter_fdr.fit(X, y)
support_fdr = filter_fdr.get_support()
assert_array_equal(support_fdr, np.array([True, False]))
filter_kbest = SelectKBest(chi2, k=1)
filter_kbest.fit(X, y)
support_kbest = filter_kbest.get_support()
assert_array_equal(support_kbest, np.array([True, False]))
filter_percentile = SelectPercentile(chi2, percentile=50)
filter_percentile.fit(X, y)
support_percentile = filter_percentile.get_support()
assert_array_equal(support_percentile, np.array([True, False]))
filter_fpr = SelectFpr(chi2, alpha=0.1)
filter_fpr.fit(X, y)
support_fpr = filter_fpr.get_support()
assert_array_equal(support_fpr, np.array([True, False]))
filter_fwe = SelectFwe(chi2, alpha=0.1)
filter_fwe.fit(X, y)
support_fwe = filter_fwe.get_support()
assert_array_equal(support_fwe, np.array([True, False]))
@pytest.mark.parametrize("alpha", [0.001, 0.01, 0.1])
@pytest.mark.parametrize("n_informative", [1, 5, 10])
def test_select_fdr_regression(alpha, n_informative):
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(100)])
assert alpha >= false_discovery_rate
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert false_discovery_rate > alpha / 10
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=bool))
assert np.sum(support[5:] == 1) < 2
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert X1.shape[1] == 1
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert X2.shape[1] == 2
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert X1.shape[1] == 1
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert X2.shape[1] == 2
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert Xt.shape == (2, 2)
assert 9998 not in Xt
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert Xt.shape == (2, 2)
assert 9998 not in Xt
def test_scorefunc_multilabel():
# Test whether k-best and percentiles works with multilabels with chi2.
X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
y = [[1, 1], [0, 1], [1, 0]]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert Xt.shape == (3, 2)
assert 0 not in Xt
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert Xt.shape == (3, 2)
assert 0 not in Xt
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, k=2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
with pytest.raises(TypeError):
SelectFeatures(score_func=10).fit(X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
with pytest.raises(ValueError):
SelectKBest(k=-1).fit(X, y)
with pytest.raises(ValueError):
SelectKBest(k=4).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='k_best', param=-1).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='k_best', param=4).fit(X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert X_selected.shape == (40, 0)
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
| bsd-3-clause |
liberatorqjw/scikit-learn | sklearn/tests/test_isotonic.py | 12 | 7545 | import numpy as np
import pickle
from sklearn.isotonic import check_increasing, isotonic_regression,\
IsotonicRegression
from sklearn.utils.testing import assert_raises, assert_array_equal,\
assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_warns_message, assert_no_warnings
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check it doesn't change y when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), y)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x)-10, max(x)+10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x)-10, max(x)+10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
EPFL-LCSB/pytfa | pytfa/redgem/debugging.py | 1 | 1995 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. module:: redgem
:platform: Unix, Windows
:synopsis: RedGEM Algorithm
.. moduleauthor:: pyTFA team
Debugging
"""
from cobra import Reaction
from pandas import Series
def make_sink(met, ub=100, lb=0):
rid = 'sink_' + met.id
try:
# if the sink already exists
# (happens when debugging several times a model)
new = met.model.reactions.get_by_id(rid)
except KeyError:
new = Reaction(id = rid)
new.add_metabolites({met:-1})
new.lower_bound = lb
new.upper_bound = ub
return new
def add_BBB_sinks(model,biomass_rxn_id, ub=100, lb=0):
bio_rxn = model.reactions.get_by_id(biomass_rxn_id)
all_BBBs = bio_rxn.reactants
all_sinks = list()
for the_BBB in all_BBBs:
new = make_sink(the_BBB, ub=ub, lb=lb)
all_sinks.append(new)
model.add_reactions([x for x in all_sinks if not x.id in model.reactions])
return [model.reactions.get_by_id(x.id) for x in all_sinks]
def check_BBB_production(model, biomass_rxn_id, verbose = False):
all_sinks = add_BBB_sinks(model, biomass_rxn_id, lb = 0)
prod = dict()
for the_sink in all_sinks:
with model:
model.objective = the_sink.id
prod[the_sink.id] = model.slim_optimize()
ret = Series(prod)
if verbose:
print(ret)
return ret
def min_BBB_uptake(model,biomass_rxn_id, min_growth_value, verbose=False):
with model:
all_sinks = add_BBB_sinks(model, biomass_rxn_id, ub = 0, lb = -100)
# Uptake is negative
# Min absolute uptake = Max uptake
bio_rxn = model.reactions.get_by_id(biomass_rxn_id)
bio_rxn.lower_bound = min_growth_value
model.objective = sum( - 1* s.reverse_variable for s in all_sinks)
model.objective_direction = 'max'
model.optimize()
ret = Series({r:r.flux for r in all_sinks})
if verbose:
print(ret)
return ret | apache-2.0 |
sgenoud/scikit-learn | sklearn/decomposition/nmf.py | 5 | 16879 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD
from __future__ import division
from ..base import BaseEstimator, TransformerMixin
from ..utils import atleast2d_or_csr, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot
import numpy as np
from scipy.optimize import nnls
import scipy.sparse as sp
import warnings
def _pos(x):
"""Positive part of a vector / matrix"""
return (x >= 0) * x
def _neg(x):
"""Negative part of a vector / matrix"""
neg_x = -x
neg_x *= x < 0
return neg_x
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
x = x.ravel()
return np.sqrt(np.dot(x.T, x))
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X: array, [n_samples, n_features]
The data matrix to be decomposed.
n_components:
The number of components desired in the
approximation.
variant: None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps:
Truncate all values less then this in output to zero.
random_state: numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H):
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
Remarks
-------
This implements the algorithm described in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://www.cs.rpi.edu/~boutsc/files/nndsvd.pdf
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in xrange(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = _pos(x), _pos(y)
x_n, y_n = _neg(x), _neg(y)
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H_init, tol, max_iter):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W:
Constant matrices
H_init:
Initial guess for the solution
tol:
Tolerance of the stopping condition.
max_iter:
Maximum number of iterations before
timing out.
Returns
-------
H:
Solution to the non-negative least squares problem
grad:
The gradient.
n_iter:
The number of iterations done by the algorithm.
"""
if (H_init < 0).any():
raise ValueError("Negative values in H_init passed to NLS solver.")
H = H_init
WtV = safe_sparse_dot(W.T, V, dense_output=True)
WtW = safe_sparse_dot(W.T, W, dense_output=True)
# values justified in the paper
alpha = 1
beta = 0.1
for n_iter in xrange(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
proj_gradient = norm(grad[np.logical_or(grad < 0, H > 0)])
if proj_gradient < tol:
break
for inner_iter in xrange(1, 20):
Hn = H - alpha * grad
# Hn = np.where(Hn > 0, Hn, 0)
Hn = _pos(Hn)
d = Hn - H
gradd = np.sum(grad * d)
dQd = np.sum(np.dot(WtW, d) * d)
# magic numbers whoa
suff_decr = 0.99 * gradd + 0.5 * dQd < 0
if inner_iter == 1:
decr_alpha = not suff_decr
Hp = H
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data the model will be fit to.
n_components: int or None
Number of components, if n_components is not set all components
are kept
init: 'nndsvd' | 'nndsvda' | 'nndsvdar' | int | RandomState
Method used to initialize the procedure.
Default: 'nndsvdar'
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
int seed or RandomState: non-negative random matrices
sparseness: 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta: double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta: double, default: 0.1
Degree of correctness to mantain, if sparsity is not None. Smaller
values mean larger error.
tol: double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter: int, default: 200
Number of iterations to compute.
nls_max_iter: int, default: 2000
Number of iterations in NLS subproblem.
Attributes
----------
`components_` : array, [n_components, n_features]
Non-negative components of the data
`reconstruction_err_` : number
Frobenius norm of the matrix difference between the
training data and the reconstructed data from the
fit produced by the model. ``|| X - WH ||_2``
Not computed for sparse input matrices because it is
too expensive in terms of memory.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init=0, max_iter=200, n_components=2,
nls_max_iter=2000, sparseness=None, tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2, init=0,
... sparseness='components')
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init=0, max_iter=200, n_components=2,
nls_max_iter=2000, sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[-0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
Notes
-----
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://www.cs.rpi.edu/~boutsc/files/nndsvd.pdf
"""
def __init__(self, n_components=None, init="nndsvdar", sparseness=None,
beta=1, eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
def _init(self, X):
n_samples, n_features = X.shape
if self.init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components)
elif self.init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components, variant='a')
elif self.init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components, variant='ar')
else:
try:
rng = check_random_state(self.init)
W = rng.randn(n_samples, self.n_components)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components, n_features)
np.abs(H, H)
except ValueError:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(self.init, (None, 'nndsvd', 'nndsvda', 'nndsvdar',
int, np.random.RandomState)))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness == None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
np.r_[X.T, np.zeros((1, n_samples))],
np.r_[H.T, np.sqrt(self.beta) *
np.ones((1, self.n_components))],
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
np.r_[X.T, np.zeros((self.n_components, n_samples))],
np.r_[H.T, np.sqrt(self.eta) *
np.eye(self.n_components)],
W.T, tolW, self.nls_max_iter)
return W, gradW, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness == None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
np.r_[X, np.zeros((self.n_components, n_features))],
np.r_[W, np.sqrt(self.eta) *
np.eye(self.n_components)],
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
np.r_[X, np.zeros((1, n_features))],
np.r_[W, np.sqrt(self.beta) *
np.ones((1, self.n_components))],
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = atleast2d_or_csr(X)
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components = n_features
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
for n_iter in xrange(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < self.tol * init_grad:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
W = W.T
gradW = gradW.T
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
if not sp.issparse(X):
self.reconstruction_err_ = norm(X - np.dot(W, H))
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit")
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = atleast2d_or_csr(X)
H = np.zeros((X.shape[0], self.n_components))
for j in xrange(0, X.shape[0]):
H[j, :], _ = nnls(self.components_.T, X[j, :])
return H
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
asoliveira/NumShip | scripts/plot/brl-ace-r-cg-plt.py | 1 | 2098 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'brl-acel-r-cg'
#Qual título colocar no gráficos?
titulo = ''#'Curva de Giro'
#Qual a cor dos gráficos?
pc = 'k'
r1c = 'b'
r2c = 'y'
r3c = 'r'
#Estilo de linha
ps = '-'
r1s = '-'
r2s = '-'
r3s = '-'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
acehis = sp.genfromtxt('../entrada/padrao/CurvaGiro/acel.dat')
acehis2 = sp.genfromtxt('../entrada/brl/saida1.1/CurvaGiro/acel.dat')
acehis3 = sp.genfromtxt('../entrada/brl/saida1.2/CurvaGiro/acel.dat')
acehis4 = sp.genfromtxt('../entrada/brl/saida1.3/CurvaGiro/acel.dat')
axl = [0, 1000, -0.005, 0.025]
#Plotando a Curva de Giro
if adi:
ylabel = r'$t\prime$'
xacelabel = r'$\dot r\prime$'
else:
ylabel = r'$\dot r \quad graus/s^2$'
xacelabel = r'$t \quad segundos$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(acehis[:, 0], acehis[:, 6] * (180 / sp.pi), color = pc,
linestyle = ps, linewidth = 1, label=ur'padrão')
plt.plot(acehis2[:, 0], acehis2[:, 6] * (180 / sp.pi), color = r1c,
linestyle= r1s, linewidth = 1, label=ur'1.1brl')
plt.plot(acehis3[:, 0], acehis3[:, 6] * (180 / sp.pi), color = r2c,
linewidth = 1, linestyle = r2s, label=ur'1.2brl')
plt.plot(acehis4[:, 0], acehis4[:, 6] * (180 / sp.pi),color = r3c,
linestyle = r3s, linewidth = 1, label=ur'1.3brl')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xacelabel)
plt.axis(axl)
plt.grid(True)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
| gpl-3.0 |
GuessWhoSamFoo/pandas | pandas/tests/scalar/period/test_period.py | 1 | 52625 | from datetime import date, datetime, timedelta
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import iNaT, period as libperiod
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG
from pandas._libs.tslibs.parsing import DateParseError
from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz
from pandas.compat import iteritems, text_type
from pandas.compat.numpy import np_datetime64_compat
import pandas as pd
from pandas import NaT, Period, Timedelta, Timestamp, offsets
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodConstruction(object):
def test_construction(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
assert i1 == i2
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
assert i1 == i2
assert i1 == i3
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
pytest.raises(ValueError, i1.__ne__, i4)
assert i4 == i5
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
assert i1 == i2
assert i1 == i3
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
assert i1 == i2
i2 = Period('1982', freq=('Min', 1))
assert i1 == i2
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
assert i1 == i2
i3 = Period(year=2005, month=3, day=1, freq='d')
assert i1 == i3
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
assert i1 == expected
expected = Period(np_datetime64_compat(
'2007-01-01 09:00:00.001Z'), freq='L')
assert i1 == expected
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
assert i1 == expected
expected = Period(np_datetime64_compat('2007-01-01 09:00:00.00101Z'),
freq='U')
assert i1 == expected
pytest.raises(ValueError, Period, ordinal=200701)
pytest.raises(ValueError, Period, '2007-1-1', freq='X')
def test_construction_bday(self):
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/10/12', freq='D')
assert i1 == i2.asfreq('B')
i2 = Period('3/11/12', freq='D')
assert i1 == i2.asfreq('B')
i2 = Period('3/12/12', freq='D')
assert i1 == i2.asfreq('B')
i3 = Period('3/10/12', freq='b')
assert i1 == i3
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
assert i1 == i2
def test_construction_quarter(self):
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
assert i1 == i2
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
assert i1 == i2
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
assert i1 == i2
assert i1 == i3
i1 = Period('05Q1')
assert i1 == i2
lower = Period('05q1')
assert i1 == lower
i1 = Period('1Q2005')
assert i1 == i2
lower = Period('1q2005')
assert i1 == lower
i1 = Period('1Q05')
assert i1 == i2
lower = Period('1q05')
assert i1 == lower
i1 = Period('4Q1984')
assert i1.year == 1984
lower = Period('4q1984')
assert i1 == lower
def test_construction_month(self):
expected = Period('2007-01', freq='M')
i1 = Period('200701', freq='M')
assert i1 == expected
i1 = Period('200701', freq='M')
assert i1 == expected
i1 = Period(200701, freq='M')
assert i1 == expected
i1 = Period(ordinal=200701, freq='M')
assert i1.year == 18695
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np_datetime64_compat('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np_datetime64_compat('2007-01-01 00:00:00.000Z'), freq='M')
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
def test_period_constructor_offsets(self):
assert (Period('1/1/2005', freq=offsets.MonthEnd()) ==
Period('1/1/2005', freq='M'))
assert (Period('2005', freq=offsets.YearEnd()) ==
Period('2005', freq='A'))
assert (Period('2005', freq=offsets.MonthEnd()) ==
Period('2005', freq='M'))
assert (Period('3/10/12', freq=offsets.BusinessDay()) ==
Period('3/10/12', freq='B'))
assert (Period('3/10/12', freq=offsets.Day()) ==
Period('3/10/12', freq='D'))
assert (Period(year=2005, quarter=1,
freq=offsets.QuarterEnd(startingMonth=12)) ==
Period(year=2005, quarter=1, freq='Q'))
assert (Period(year=2005, quarter=2,
freq=offsets.QuarterEnd(startingMonth=12)) ==
Period(year=2005, quarter=2, freq='Q'))
assert (Period(year=2005, month=3, day=1, freq=offsets.Day()) ==
Period(year=2005, month=3, day=1, freq='D'))
assert (Period(year=2012, month=3, day=10, freq=offsets.BDay()) ==
Period(year=2012, month=3, day=10, freq='B'))
expected = Period('2005-03-01', freq='3D')
assert (Period(year=2005, month=3, day=1,
freq=offsets.Day(3)) == expected)
assert Period(year=2005, month=3, day=1, freq='3D') == expected
assert (Period(year=2012, month=3, day=10,
freq=offsets.BDay(3)) ==
Period(year=2012, month=3, day=10, freq='3B'))
assert (Period(200701, freq=offsets.MonthEnd()) ==
Period(200701, freq='M'))
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq='M')
assert i1 == i2
assert i1.year == 18695
assert i2.year == 18695
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np_datetime64_compat('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np_datetime64_compat('2007-01-01 00:00:00.000Z'), freq='M')
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
assert i1 == expected
expected = Period(np_datetime64_compat(
'2007-01-01 09:00:00.001Z'), freq='L')
assert i1 == expected
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
assert i1 == expected
expected = Period(np_datetime64_compat('2007-01-01 09:00:00.00101Z'),
freq='U')
assert i1 == expected
pytest.raises(ValueError, Period, ordinal=200701)
pytest.raises(ValueError, Period, '2007-1-1', freq='X')
def test_invalid_arguments(self):
with pytest.raises(ValueError):
Period(datetime.now())
with pytest.raises(ValueError):
Period(datetime.now().date())
with pytest.raises(ValueError):
Period(1.6, freq='D')
with pytest.raises(ValueError):
Period(ordinal=1.6, freq='D')
with pytest.raises(ValueError):
Period(ordinal=2, value=1, freq='D')
with pytest.raises(ValueError):
Period(month=1)
with pytest.raises(ValueError):
Period('-2000', 'A')
with pytest.raises(DateParseError):
Period('0', 'A')
with pytest.raises(DateParseError):
Period('1/1/-2000', 'A')
def test_constructor_corner(self):
expected = Period('2007-01', freq='2M')
assert Period(year=2007, month=1, freq='2M') == expected
assert Period(None) is NaT
p = Period('2007-01-01', freq='D')
result = Period(p, freq='A')
exp = Period('2007', freq='A')
assert result == exp
def test_constructor_infer_freq(self):
p = Period('2007-01-01')
assert p.freq == 'D'
p = Period('2007-01-01 07')
assert p.freq == 'H'
p = Period('2007-01-01 07:10')
assert p.freq == 'T'
p = Period('2007-01-01 07:10:15')
assert p.freq == 'S'
p = Period('2007-01-01 07:10:15.123')
assert p.freq == 'L'
p = Period('2007-01-01 07:10:15.123000')
assert p.freq == 'L'
p = Period('2007-01-01 07:10:15.123400')
assert p.freq == 'U'
def test_multiples(self):
result1 = Period('1989', freq='2A')
result2 = Period('1989', freq='A')
assert result1.ordinal == result2.ordinal
assert result1.freqstr == '2A-DEC'
assert result2.freqstr == 'A-DEC'
assert result1.freq == offsets.YearEnd(2)
assert result2.freq == offsets.YearEnd()
assert (result1 + 1).ordinal == result1.ordinal + 2
assert (1 + result1).ordinal == result1.ordinal + 2
assert (result1 - 1).ordinal == result2.ordinal - 2
assert (-1 + result1).ordinal == result2.ordinal - 2
@pytest.mark.parametrize('month', MONTHS)
def test_period_cons_quarterly(self, month):
# bugs in scikits.timeseries
freq = 'Q-%s' % month
exp = Period('1989Q3', freq=freq)
assert '1989Q3' in str(exp)
stamp = exp.to_timestamp('D', how='end')
p = Period(stamp, freq=freq)
assert p == exp
stamp = exp.to_timestamp('3D', how='end')
p = Period(stamp, freq=freq)
assert p == exp
@pytest.mark.parametrize('month', MONTHS)
def test_period_cons_annual(self, month):
# bugs in scikits.timeseries
freq = 'A-%s' % month
exp = Period('1989', freq=freq)
stamp = exp.to_timestamp('D', how='end') + timedelta(days=30)
p = Period(stamp, freq=freq)
assert p == exp + 1
assert isinstance(p, Period)
@pytest.mark.parametrize('day', DAYS)
@pytest.mark.parametrize('num', range(10, 17))
def test_period_cons_weekly(self, num, day):
daystr = '2011-02-%d' % num
freq = 'W-%s' % day
result = Period(daystr, freq=freq)
expected = Period(daystr, freq='D').asfreq(freq)
assert result == expected
assert isinstance(result, Period)
def test_period_from_ordinal(self):
p = Period('2011-01', freq='M')
res = Period._from_ordinal(p.ordinal, freq='M')
assert p == res
assert isinstance(res, Period)
def test_period_cons_nat(self):
p = Period('NaT', freq='M')
assert p is NaT
p = Period('nat', freq='W-SUN')
assert p is NaT
p = Period(iNaT, freq='D')
assert p is NaT
p = Period(iNaT, freq='3D')
assert p is NaT
p = Period(iNaT, freq='1D1H')
assert p is NaT
p = Period('NaT')
assert p is NaT
p = Period(iNaT)
assert p is NaT
def test_period_cons_mult(self):
p1 = Period('2011-01', freq='3M')
p2 = Period('2011-01', freq='M')
assert p1.ordinal == p2.ordinal
assert p1.freq == offsets.MonthEnd(3)
assert p1.freqstr == '3M'
assert p2.freq == offsets.MonthEnd()
assert p2.freqstr == 'M'
result = p1 + 1
assert result.ordinal == (p2 + 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == '3M'
result = p1 - 1
assert result.ordinal == (p2 - 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == '3M'
msg = ('Frequency must be positive, because it'
' represents span: -3M')
with pytest.raises(ValueError, match=msg):
Period('2011-01', freq='-3M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with pytest.raises(ValueError, match=msg):
Period('2011-01', freq='0M')
def test_period_cons_combined(self):
p = [(Period('2011-01', freq='1D1H'),
Period('2011-01', freq='1H1D'),
Period('2011-01', freq='H')),
(Period(ordinal=1, freq='1D1H'),
Period(ordinal=1, freq='1H1D'),
Period(ordinal=1, freq='H'))]
for p1, p2, p3 in p:
assert p1.ordinal == p3.ordinal
assert p2.ordinal == p3.ordinal
assert p1.freq == offsets.Hour(25)
assert p1.freqstr == '25H'
assert p2.freq == offsets.Hour(25)
assert p2.freqstr == '25H'
assert p3.freq == offsets.Hour()
assert p3.freqstr == 'H'
result = p1 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == '25H'
result = p2 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == '25H'
result = p1 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == '25H'
result = p2 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == '25H'
msg = ('Frequency must be positive, because it'
' represents span: -25H')
with pytest.raises(ValueError, match=msg):
Period('2011-01', freq='-1D1H')
with pytest.raises(ValueError, match=msg):
Period('2011-01', freq='-1H1D')
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq='-1D1H')
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq='-1H1D')
msg = ('Frequency must be positive, because it'
' represents span: 0D')
with pytest.raises(ValueError, match=msg):
Period('2011-01', freq='0D0H')
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq='0D0H')
# You can only combine together day and intraday offsets
msg = ('Invalid frequency: 1W1D')
with pytest.raises(ValueError, match=msg):
Period('2011-01', freq='1W1D')
msg = ('Invalid frequency: 1D1W')
with pytest.raises(ValueError, match=msg):
Period('2011-01', freq='1D1W')
class TestPeriodMethods(object):
def test_round_trip(self):
p = Period('2000Q1')
new_p = tm.round_trip_pickle(p)
assert new_p == p
def test_hash(self):
assert (hash(Period('2011-01', freq='M')) ==
hash(Period('2011-01', freq='M')))
assert (hash(Period('2011-01-01', freq='D')) !=
hash(Period('2011-01', freq='M')))
assert (hash(Period('2011-01', freq='3M')) !=
hash(Period('2011-01', freq='2M')))
assert (hash(Period('2011-01', freq='M')) !=
hash(Period('2011-02', freq='M')))
# --------------------------------------------------------------
# to_timestamp
@pytest.mark.parametrize('tzstr', ['Europe/Brussels',
'Asia/Tokyo', 'US/Pacific'])
def test_to_timestamp_tz_arg(self, tzstr):
p = Period('1/1/2005', freq='M').to_timestamp(tz=tzstr)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period('1/1/2005', freq='3H').to_timestamp(tz=tzstr)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period('1/1/2005', freq='A').to_timestamp(freq='A', tz=tzstr)
exp = Timestamp('31/12/2005', tz='UTC').tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period('1/1/2005', freq='A').to_timestamp(freq='3H', tz=tzstr)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
@pytest.mark.parametrize('tzstr', ['dateutil/Europe/Brussels',
'dateutil/Asia/Tokyo',
'dateutil/US/Pacific'])
def test_to_timestamp_tz_arg_dateutil(self, tzstr):
tz = maybe_get_tz(tzstr)
p = Period('1/1/2005', freq='M').to_timestamp(tz=tz)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split('/', 1)[1])
assert p.tz == exp.tz
p = Period('1/1/2005', freq='M').to_timestamp(freq='3H', tz=tz)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split('/', 1)[1])
assert p.tz == exp.tz
def test_to_timestamp_tz_arg_dateutil_from_string(self):
p = Period('1/1/2005',
freq='M').to_timestamp(tz='dateutil/Europe/Brussels')
assert p.tz == dateutil_gettz('Europe/Brussels')
def test_to_timestamp_mult(self):
p = Period('2011-01', freq='M')
assert p.to_timestamp(how='S') == Timestamp('2011-01-01')
expected = Timestamp('2011-02-01') - Timedelta(1, 'ns')
assert p.to_timestamp(how='E') == expected
p = Period('2011-01', freq='3M')
assert p.to_timestamp(how='S') == Timestamp('2011-01-01')
expected = Timestamp('2011-04-01') - Timedelta(1, 'ns')
assert p.to_timestamp(how='E') == expected
def test_to_timestamp(self):
p = Period('1982', freq='A')
start_ts = p.to_timestamp(how='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
assert start_ts == p.to_timestamp('D', how=a)
# freq with mult should not affect to the result
assert start_ts == p.to_timestamp('3D', how=a)
end_ts = p.to_timestamp(how='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
assert end_ts == p.to_timestamp('D', how=a)
assert end_ts == p.to_timestamp('3D', how=a)
from_lst = ['A', 'Q', 'M', 'W', 'B', 'D', 'H', 'Min', 'S']
def _ex(p):
return Timestamp((p + p.freq).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period('1982', freq=fcode)
result = p.to_timestamp().to_period(fcode)
assert result == p
assert p.start_time == p.to_timestamp(how='S')
assert p.end_time == _ex(p)
# Frequency other than daily
p = Period('1985', freq='A')
result = p.to_timestamp('H', how='end')
expected = Timestamp(1986, 1, 1) - Timedelta(1, 'ns')
assert result == expected
result = p.to_timestamp('3H', how='end')
assert result == expected
result = p.to_timestamp('T', how='end')
expected = Timestamp(1986, 1, 1) - Timedelta(1, 'ns')
assert result == expected
result = p.to_timestamp('2T', how='end')
assert result == expected
result = p.to_timestamp(how='end')
expected = Timestamp(1986, 1, 1) - Timedelta(1, 'ns')
assert result == expected
expected = datetime(1985, 1, 1)
result = p.to_timestamp('H', how='start')
assert result == expected
result = p.to_timestamp('T', how='start')
assert result == expected
result = p.to_timestamp('S', how='start')
assert result == expected
result = p.to_timestamp('3H', how='start')
assert result == expected
result = p.to_timestamp('5S', how='start')
assert result == expected
# --------------------------------------------------------------
# Rendering: __repr__, strftime, etc
def test_repr(self):
p = Period('Jan-2000')
assert '2000-01' in repr(p)
p = Period('2000-12-15')
assert '2000-12-15' in repr(p)
def test_repr_nat(self):
p = Period('nat', freq='M')
assert repr(NaT) in repr(p)
def test_millisecond_repr(self):
p = Period('2000-01-01 12:15:02.123')
assert repr(p) == "Period('2000-01-01 12:15:02.123', 'L')"
def test_microsecond_repr(self):
p = Period('2000-01-01 12:15:02.123567')
assert repr(p) == "Period('2000-01-01 12:15:02.123567', 'U')"
def test_strftime(self):
# GH#3363
p = Period('2000-1-1 12:34:12', freq='S')
res = p.strftime('%Y-%m-%d %H:%M:%S')
assert res == '2000-01-01 12:34:12'
assert isinstance(res, text_type)
class TestPeriodProperties(object):
"Test properties such as year, month, weekday, etc...."
@pytest.mark.parametrize('freq', ['A', 'M', 'D', 'H'])
def test_is_leap_year(self, freq):
# GH 13727
p = Period('2000-01-01 00:00:00', freq=freq)
assert p.is_leap_year
assert isinstance(p.is_leap_year, bool)
p = Period('1999-01-01 00:00:00', freq=freq)
assert not p.is_leap_year
p = Period('2004-01-01 00:00:00', freq=freq)
assert p.is_leap_year
p = Period('2100-01-01 00:00:00', freq=freq)
assert not p.is_leap_year
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq='Q-DEC')
assert p.year == 1969
assert p.quarter == 4
assert isinstance(p, Period)
p = Period(ordinal=-2, freq='Q-DEC')
assert p.year == 1969
assert p.quarter == 3
assert isinstance(p, Period)
p = Period(ordinal=-2, freq='M')
assert p.year == 1969
assert p.month == 11
assert isinstance(p, Period)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
assert i1.freq == offsets.Minute()
assert i1.freqstr == 'T'
def test_period_deprecated_freq(self):
cases = {"M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"],
"B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"],
"D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"],
"H": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"],
"T": ["minute", "MINUTE", "MINUTELY", "minutely"],
"S": ["sec", "SEC", "SECOND", "SECONDLY", "second"],
"L": ["MILLISECOND", "MILLISECONDLY", "millisecond"],
"U": ["MICROSECOND", "MICROSECONDLY", "microsecond"],
"N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"]}
msg = INVALID_FREQ_ERR_MSG
for exp, freqs in iteritems(cases):
for freq in freqs:
with pytest.raises(ValueError, match=msg):
Period('2016-03-01 09:00', freq=freq)
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq=freq)
# check supported freq-aliases still works
p1 = Period('2016-03-01 09:00', freq=exp)
p2 = Period(ordinal=1, freq=exp)
assert isinstance(p1, Period)
assert isinstance(p2, Period)
def test_start_time(self):
freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S']
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period('2012', freq=f)
assert p.start_time == xp
assert Period('2012', freq='B').start_time == datetime(2012, 1, 2)
assert Period('2012', freq='W').start_time == datetime(2011, 12, 26)
def test_end_time(self):
p = Period('2012', freq='A')
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
assert xp == p.end_time
p = Period('2012', freq='Q')
xp = _ex(2012, 4, 1)
assert xp == p.end_time
p = Period('2012', freq='M')
xp = _ex(2012, 2, 1)
assert xp == p.end_time
p = Period('2012', freq='D')
xp = _ex(2012, 1, 2)
assert xp == p.end_time
p = Period('2012', freq='H')
xp = _ex(2012, 1, 1, 1)
assert xp == p.end_time
p = Period('2012', freq='B')
xp = _ex(2012, 1, 3)
assert xp == p.end_time
p = Period('2012', freq='W')
xp = _ex(2012, 1, 2)
assert xp == p.end_time
# Test for GH 11738
p = Period('2012', freq='15D')
xp = _ex(2012, 1, 16)
assert xp == p.end_time
p = Period('2012', freq='1D1H')
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
p = Period('2012', freq='1H1D')
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period('2013-1-1', 'W-SAT')
xp = _ex(2013, 1, 6)
assert p.end_time == xp
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert a_date.year == 2007
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert (qd + x).qyear == 2007
assert (qd + x).quarter == x + 1
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert m_ival_x.year == 2007
if 1 <= x + 1 <= 3:
assert m_ival_x.quarter == 1
elif 4 <= x + 1 <= 6:
assert m_ival_x.quarter == 2
elif 7 <= x + 1 <= 9:
assert m_ival_x.quarter == 3
elif 10 <= x + 1 <= 12:
assert m_ival_x.quarter == 4
assert m_ival_x.month == x + 1
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='W', year=2007, month=1, day=7)
#
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
assert Period(freq='W', year=2012,
month=2, day=1).days_in_month == 29
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='W', year=2007, month=1, day=7)
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
exp = Period(freq='W', year=2012, month=2, day=1)
assert exp.days_in_month == 29
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq='WK', year=2007, month=1, day=7)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert b_date.year == 2007
assert b_date.quarter == 1
assert b_date.month == 1
assert b_date.day == 1
assert b_date.weekday == 0
assert b_date.dayofyear == 1
assert b_date.days_in_month == 31
assert Period(freq='B', year=2012,
month=2, day=1).days_in_month == 29
d_date = Period(freq='D', year=2007, month=1, day=1)
assert d_date.year == 2007
assert d_date.quarter == 1
assert d_date.month == 1
assert d_date.day == 1
assert d_date.weekday == 0
assert d_date.dayofyear == 1
assert d_date.days_in_month == 31
assert Period(freq='D', year=2012, month=2,
day=1).days_in_month == 29
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq='H', year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq='2H', year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
assert h_date.year == 2007
assert h_date.quarter == 1
assert h_date.month == 1
assert h_date.day == 1
assert h_date.weekday == 0
assert h_date.dayofyear == 1
assert h_date.hour == 0
assert h_date.days_in_month == 31
assert Period(freq='H', year=2012, month=2, day=1,
hour=0).days_in_month == 29
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert t_date.quarter == 1
assert t_date.month == 1
assert t_date.day == 1
assert t_date.weekday == 0
assert t_date.dayofyear == 1
assert t_date.hour == 0
assert t_date.minute == 0
assert t_date.days_in_month == 31
assert Period(freq='D', year=2012, month=2, day=1, hour=0,
minute=0).days_in_month == 29
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
#
assert s_date.year == 2007
assert s_date.quarter == 1
assert s_date.month == 1
assert s_date.day == 1
assert s_date.weekday == 0
assert s_date.dayofyear == 1
assert s_date.hour == 0
assert s_date.minute == 0
assert s_date.second == 0
assert s_date.days_in_month == 31
assert Period(freq='Min', year=2012, month=2, day=1, hour=0,
minute=0, second=0).days_in_month == 29
class TestPeriodField(object):
def test_get_period_field_array_raises_on_out_of_range(self):
pytest.raises(ValueError, libperiod.get_period_field_arr, -1,
np.empty(1), 0)
class TestComparisons(object):
def setup_method(self, method):
self.january1 = Period('2000-01', 'M')
self.january2 = Period('2000-01', 'M')
self.february = Period('2000-02', 'M')
self.march = Period('2000-03', 'M')
self.day = Period('2012-01-01', 'D')
def test_equal(self):
assert self.january1 == self.january2
def test_equal_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 == self.day
def test_notEqual(self):
assert self.january1 != 1
assert self.january1 != self.february
def test_greater(self):
assert self.february > self.january1
def test_greater_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 > self.day
def test_greater_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 > 1
def test_greaterEqual(self):
assert self.january1 >= self.january2
def test_greaterEqual_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 >= self.day
with pytest.raises(TypeError):
print(self.january1 >= 1)
def test_smallerEqual(self):
assert self.january1 <= self.january2
def test_smallerEqual_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 <= self.day
def test_smallerEqual_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 <= 1
def test_smaller(self):
assert self.january1 < self.february
def test_smaller_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 < self.day
def test_smaller_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 < 1
def test_sort(self):
periods = [self.march, self.january1, self.february]
correctPeriods = [self.january1, self.february, self.march]
assert sorted(periods) == correctPeriods
def test_period_nat_comp(self):
p_nat = Period('NaT', freq='D')
p = Period('2011-01-01', freq='D')
nat = Timestamp('NaT')
t = Timestamp('2011-01-01')
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat), (nat, t),
(t, nat), (nat, nat)]:
assert not left < right
assert not left > right
assert not left == right
assert left != right
assert not left <= right
assert not left >= right
class TestArithmetic(object):
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
result = left - right
assert result == 4 * right.freq
with pytest.raises(period.IncompatibleFrequency):
left - Period('2007-01', freq='M')
def test_add_integer(self):
per1 = Period(freq='D', year=2008, month=1, day=1)
per2 = Period(freq='D', year=2008, month=1, day=2)
assert per1 + 1 == per2
assert 1 + per1 == per2
def test_add_sub_nat(self):
# GH#13071
p = Period('2011-01', freq='M')
assert p + NaT is NaT
assert NaT + p is NaT
assert p - NaT is NaT
assert NaT - p is NaT
p = Period('NaT', freq='M')
assert p + NaT is NaT
assert NaT + p is NaT
assert p - NaT is NaT
assert NaT - p is NaT
def test_add_invalid(self):
# GH#4731
per1 = Period(freq='D', year=2008, month=1, day=1)
per2 = Period(freq='D', year=2008, month=1, day=2)
msg = r"unsupported operand type\(s\)"
with pytest.raises(TypeError, match=msg):
per1 + "str"
with pytest.raises(TypeError, match=msg):
"str" + per1
with pytest.raises(TypeError, match=msg):
per1 + per2
boxes = [lambda x: x, lambda x: pd.Series([x]), lambda x: pd.Index([x])]
ids = ['identity', 'Series', 'Index']
@pytest.mark.parametrize('lbox', boxes, ids=ids)
@pytest.mark.parametrize('rbox', boxes, ids=ids)
def test_add_timestamp_raises(self, rbox, lbox):
# GH#17983
ts = Timestamp('2017')
per = Period('2017', freq='M')
# We may get a different message depending on which class raises
# the error.
msg = (r"cannot add|unsupported operand|"
r"can only operate on a|incompatible type|"
r"ufunc add cannot use operands")
with pytest.raises(TypeError, match=msg):
lbox(ts) + rbox(per)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(ts)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(per)
def test_sub(self):
per1 = Period('2011-01-01', freq='D')
per2 = Period('2011-01-15', freq='D')
off = per1.freq
assert per1 - per2 == -14 * off
assert per2 - per1 == 14 * off
msg = r"Input has different freq=M from Period\(freq=D\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
per1 - Period('2011-02', freq='M')
@pytest.mark.parametrize('n', [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1 = pd.Period('19910905', freq=tick_classes(n))
p2 = pd.Period('19920406', freq=tick_classes(n))
expected = (pd.Period(str(p2), freq=p2.freq.base)
- pd.Period(str(p1), freq=p1.freq.base))
assert (p2 - p1) == expected
@pytest.mark.parametrize('normalize', [True, False])
@pytest.mark.parametrize('n', [1, 2, 3, 4])
@pytest.mark.parametrize('offset, kwd_name', [
(pd.offsets.YearEnd, 'month'),
(pd.offsets.QuarterEnd, 'startingMonth'),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, 'weekday')
])
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = '19910905'
p2_d = '19920406'
p1 = pd.Period(p1_d, freq=offset(n, normalize, **kwds))
p2 = pd.Period(p2_d, freq=offset(n, normalize, **kwds))
expected = (pd.Period(p2_d, freq=p2.freq.base)
- pd.Period(p1_d, freq=p1.freq.base))
assert (p2 - p1) == expected
def test_add_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
exp = Period('2013', freq=freq)
assert p + offsets.YearEnd(2) == exp
assert offsets.YearEnd(2) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
exp = Period('2011-05', freq=freq)
assert p + offsets.MonthEnd(2) == exp
assert offsets.MonthEnd(2) + p == exp
exp = Period('2012-03', freq=freq)
assert p + offsets.MonthEnd(12) == exp
assert offsets.MonthEnd(12) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
exp = Period('2011-04-06', freq=freq)
assert p + offsets.Day(5) == exp
assert offsets.Day(5) + p == exp
exp = Period('2011-04-02', freq=freq)
assert p + offsets.Hour(24) == exp
assert offsets.Hour(24) + p == exp
exp = Period('2011-04-03', freq=freq)
assert p + np.timedelta64(2, 'D') == exp
with pytest.raises(TypeError):
np.timedelta64(2, 'D') + p
exp = Period('2011-04-02', freq=freq)
assert p + np.timedelta64(3600 * 24, 's') == exp
with pytest.raises(TypeError):
np.timedelta64(3600 * 24, 's') + p
exp = Period('2011-03-30', freq=freq)
assert p + timedelta(-2) == exp
assert timedelta(-2) + p == exp
exp = Period('2011-04-03', freq=freq)
assert p + timedelta(hours=48) == exp
assert timedelta(hours=48) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
exp = Period('2011-04-03 09:00', freq=freq)
assert p + offsets.Day(2) == exp
assert offsets.Day(2) + p == exp
exp = Period('2011-04-01 12:00', freq=freq)
assert p + offsets.Hour(3) == exp
assert offsets.Hour(3) + p == exp
exp = Period('2011-04-01 12:00', freq=freq)
assert p + np.timedelta64(3, 'h') == exp
with pytest.raises(TypeError):
np.timedelta64(3, 'h') + p
exp = Period('2011-04-01 10:00', freq=freq)
assert p + np.timedelta64(3600, 's') == exp
with pytest.raises(TypeError):
np.timedelta64(3600, 's') + p
exp = Period('2011-04-01 11:00', freq=freq)
assert p + timedelta(minutes=120) == exp
assert timedelta(minutes=120) + p == exp
exp = Period('2011-04-05 12:00', freq=freq)
assert p + timedelta(days=4, minutes=180) == exp
assert timedelta(days=4, minutes=180) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
def test_add_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
assert p + o is NaT
assert o + p is NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p + o is NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is NaT
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
assert p + o is NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p + o is NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is NaT
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2),
timedelta(hours=48)]:
assert p + o is NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
assert p + o is NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is NaT
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
assert p + o is NaT
if not isinstance(o, np.timedelta64):
assert o + p is NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
assert p + o is NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is NaT
def test_sub_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
assert p - offsets.YearEnd(2) == Period('2009', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
assert p - offsets.MonthEnd(2) == Period('2011-01', freq=freq)
assert p - offsets.MonthEnd(12) == Period('2010-03', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
assert p - offsets.Day(5) == Period('2011-03-27', freq=freq)
assert p - offsets.Hour(24) == Period('2011-03-31', freq=freq)
assert p - np.timedelta64(2, 'D') == Period(
'2011-03-30', freq=freq)
assert p - np.timedelta64(3600 * 24, 's') == Period(
'2011-03-31', freq=freq)
assert p - timedelta(-2) == Period('2011-04-03', freq=freq)
assert p - timedelta(hours=48) == Period('2011-03-30', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
assert p - offsets.Day(2) == Period('2011-03-30 09:00', freq=freq)
assert p - offsets.Hour(3) == Period('2011-04-01 06:00', freq=freq)
assert p - np.timedelta64(3, 'h') == Period(
'2011-04-01 06:00', freq=freq)
assert p - np.timedelta64(3600, 's') == Period(
'2011-04-01 08:00', freq=freq)
assert p - timedelta(minutes=120) == Period(
'2011-04-01 07:00', freq=freq)
assert p - timedelta(days=4, minutes=180) == Period(
'2011-03-28 06:00', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
def test_sub_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
assert p - o is NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p - o is NaT
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
assert p - o is NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p - o is NaT
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2),
timedelta(hours=48)]:
assert p - o is NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
assert p - o is NaT
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
assert p - o is NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
assert p - o is NaT
@pytest.mark.parametrize('freq', ['M', '2M', '3M'])
def test_nat_ops(self, freq):
p = Period('NaT', freq=freq)
assert p + 1 is NaT
assert 1 + p is NaT
assert p - 1 is NaT
assert p - Period('2011-01', freq=freq) is NaT
assert Period('2011-01', freq=freq) - p is NaT
def test_period_ops_offset(self):
p = Period('2011-04-01', freq='D')
result = p + offsets.Day()
exp = Period('2011-04-02', freq='D')
assert result == exp
result = p - offsets.Day(2)
exp = Period('2011-03-30', freq='D')
assert result == exp
msg = r"Input cannot be converted to Period\(freq=D\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
p + offsets.Hour(2)
with pytest.raises(period.IncompatibleFrequency, match=msg):
p - offsets.Hour(2)
def test_period_immutable():
# see gh-17116
per = Period('2014Q1')
with pytest.raises(AttributeError):
per.ordinal = 14
freq = per.freq
with pytest.raises(AttributeError):
per.freq = 2 * freq
# TODO: This doesn't fail on all systems; track down which
@pytest.mark.xfail(reason="Parses as Jan 1, 0007 on some systems",
strict=False)
def test_small_year_parsing():
per1 = Period('0001-01-07', 'D')
assert per1.year == 1
assert per1.day == 7
| bsd-3-clause |
phobson/bokeh | examples/models/anscombe.py | 1 | 2996 | from __future__ import print_function
import numpy as np
import pandas as pd
from bokeh.util.browser import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.layouts import gridplot
from bokeh.models.glyphs import Circle, Line
from bokeh.models import ColumnDataSource, Grid, LinearAxis, Plot, Range1d
from bokeh.resources import INLINE
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
circles_source = ColumnDataSource(
data = dict(
xi = quartet['Ix'],
yi = quartet['Iy'],
xii = quartet['IIx'],
yii = quartet['IIy'],
xiii = quartet['IIIx'],
yiii = quartet['IIIy'],
xiv = quartet['IVx'],
yiv = quartet['IVy'],
)
)
x = np.linspace(-0.5, 20.5, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=-0.5, end=20.5)
ydr = Range1d(start=-0.5, end=20.5)
def make_plot(title, xname, yname):
plot = Plot(x_range=xdr, y_range=ydr, plot_width=400, plot_height=400,
border_fill_color='white', background_fill_color='#e9e0db')
plot.title.text = title
xaxis = LinearAxis(axis_line_color=None)
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_line_color=None)
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
line = Line(x='x', y='y', line_color="#666699", line_width=2)
plot.add_glyph(lines_source, line)
circle = Circle(
x=xname, y=yname, size=12,
fill_color="#cc6633", line_color="#cc6633", fill_alpha=0.5
)
plot.add_glyph(circles_source, circle)
return plot
#where will this comment show up
I = make_plot('I', 'xi', 'yi')
II = make_plot('II', 'xii', 'yii')
III = make_plot('III', 'xiii', 'yiii')
IV = make_plot('IV', 'xiv', 'yiv')
grid = gridplot([[I, II], [III, IV]], toolbar_location=None)
doc = Document()
doc.add_root(grid)
if __name__ == "__main__":
filename = "anscombe.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Anscombe's Quartet"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
tum-pbs/PhiFlow | phi/vis/_widgets/_widgets_gui.py | 1 | 13128 | import asyncio
import sys
import time
import traceback
import warnings
from contextlib import contextmanager
from math import log10
import ipywidgets as widgets
from IPython import get_ipython
from IPython.display import display
from ipywidgets import HBox, VBox
from ipywidgets.widgets.interaction import show_inline_matplotlib_plots
from matplotlib import pyplot as plt
from phi.math._shape import parse_dim_order
from phi.field import SampledField
from .._matplotlib._matplotlib_plots import plot
from .._vis_base import Gui, VisModel, display_name, GuiInterrupt, select_channel, value_range, is_log_control, Action
class WidgetsGui(Gui):
def __init__(self):
Gui.__init__(self, asynchronous=False)
self.shell = get_ipython()
self.kernel = self.shell.kernel
self.loop_parent = None # set when loop is created
# Status
self.waiting_steps = 0
self._interrupted = False
self.fields = None
self.field = None
self._in_loop = None
self._last_plot_update_time = None
# Components will be created during show()
self.figure_display = None
self.status = None
self.field_select = None
self.vector_select = None
self.dim_sliders = {}
self._graphs_enabled = False
def setup(self, app: VisModel):
Gui.setup(self, app)
self.fields = self.app.field_names
self.field = self.fields[0]
app.pre_step.append(self.pre_step)
app.post_step.append(self.post_step)
app.progress_available.append(self.on_loop_start)
app.progress_unavailable.append(self.on_loop_exit)
def custom_traceback(exc_tuple=None, filename=None, tb_offset=None, exception_only=False, **kwargs):
etype, value, tb = sys.exc_info()
if etype == GuiInterrupt:
return
else:
normal_traceback(exc_tuple, filename, tb_offset, exception_only, **kwargs)
normal_traceback = self.shell.showtraceback
self.shell.showtraceback = custom_traceback
def show(self, caller_is_main: bool) -> bool:
self.figure_display = widgets.Output()
# Icons: https://en.wikipedia.org/wiki/Media_control_symbols️ ⏮ ⏭ ⏺ ⏏
self.play_button = widgets.Button(description="▶ Play")
self.play_button.on_click(self.play)
self.pause_button = widgets.Button(description="▌▌ Pause")
self.pause_button.on_click(self.pause)
self.step_button = widgets.Button(description="Step")
self.step_button.on_click(self.step)
self.interrupt_button = widgets.Button(description="⏏ Break")
self.interrupt_button.on_click(self.interrupt)
self.progression_buttons = [self.play_button, self.pause_button, self.step_button, self.interrupt_button]
for button in self.progression_buttons:
button.layout.visibility = 'hidden'
self.status = widgets.Label(value=self._get_status())
self.field_select = widgets.Dropdown(options=[*self.fields, 'Scalars'], value=self.fields[0], description='Display:')
self.field_select.layout.visibility = 'visible' if len(self.app.field_names) > 1 else 'hidden'
self.field_select.observe(lambda change: self.show_field(change['new']) if change['type'] == 'change' and change['name'] == 'value' else None)
dim_sliders = []
for sel_dim in parse_dim_order(self.config.get('select', [])):
slider = widgets.IntSlider(value=0, min=0, max=0, description=sel_dim, continuous_update=False)
self.dim_sliders[sel_dim] = slider
dim_sliders.append(slider)
slider.observe(lambda e: None if IGNORE_EVENTS else self.update_widgets(), 'value')
self.vector_select = widgets.ToggleButtons(
options=['🡡', 'x', 'y', 'z', '⬤'],
value='🡡',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Vectors as arrows', 'x component as heatmap', 'y component as heatmap', 'z component as heatmap', 'vector length as heatmap'],
# icons=['check'] * 3
)
self.vector_select.style.button_width = '30px'
self.vector_select.observe(lambda e: None if IGNORE_EVENTS else self.update_widgets(), 'value')
control_components = []
for control in self.app.controls:
val_min, val_max = value_range(control)
if control.control_type == int:
control_component = widgets.IntSlider(control.initial, min=val_min, max=val_max, step=1, description=display_name(control.name))
elif control.control_type == float:
if is_log_control(control):
val_min, val_max = log10(val_min), log10(val_max)
control_component = widgets.FloatLogSlider(control.initial, base=10, min=val_min, max=val_max, description=display_name(control.name))
else:
control_component = widgets.FloatSlider(control.initial, min=val_min, max=val_max, description=display_name(control.name))
elif control.control_type == bool:
control_component = widgets.Checkbox(control.initial, description=display_name(control.name))
elif control.control_type == str:
control_component = widgets.Text(value=control.initial, placeholder=control.initial, description=display_name(control.name))
else:
raise ValueError(f'Illegal control type: {control.control_type}')
control_component.observe(lambda e, c=control: None if IGNORE_EVENTS else self.app.set_control_value(c.name, e['new']), 'value')
control_components.append(control_component)
action_buttons = []
for action in self.app.actions:
button = widgets.Button(description=display_name(action.name))
button.on_click(lambda e, act=action: self.run_action(act))
action_buttons.append(button)
layout = VBox([
HBox(self.progression_buttons + action_buttons),
self.status,
HBox(dim_sliders),
VBox(control_components),
HBox([self.field_select, self.vector_select], layout=widgets.Layout(height='35px')),
self.figure_display
])
# Show initial value and display UI
self.update_widgets()
display(layout)
def _get_status(self):
if self._in_loop is None: # no loop yet
return self.app.message or ""
elif self._in_loop is True:
return f"Frame {self.app.steps}. {self.app.message or ''}"
else:
return f"Finished {self.app.steps} steps. {self.app.message or ''}"
def _should_progress(self):
return self.waiting_steps is None or self.waiting_steps > 0
def show_field(self, field: str):
self.field = field
self.update_widgets()
def update_widgets(self, plot=True, scroll_to_last=False):
self.status.value = self._get_status()
if not self._graphs_enabled and self.app.curve_names:
self._graphs_enabled = True
self.field_select.layout.visibility = 'visible'
if self.field == 'Scalars':
self.vector_select.layout.visibility = 'hidden'
for sel_dim, slider in self.dim_sliders.items():
slider.layout.visibility = 'hidden'
else:
shape = self.app.get_field_shape(self.field)
self.vector_select.layout.visibility = 'visible' if 'vector' in shape else 'hidden'
for sel_dim, slider in self.dim_sliders.items():
if sel_dim in shape:
slider.layout.visibility = 'visible'
slider.max = shape.get_size(sel_dim) - 1
if scroll_to_last and sel_dim in self.app.growing_dims:
with ignore_events():
slider.value = shape.get_size(sel_dim) - 1
else:
slider.layout.visibility = 'hidden'
# Figure
if plot:
if 'style' in self.config:
with plt.style.context(self.config['style']):
self._plot(self.field, self.figure_display)
else:
self._plot(self.field, self.figure_display)
def _plot(self, field_name: str, output: widgets.Output):
dim_selection = {name: slider.value for name, slider in self.dim_sliders.items()}
self.figure_display.clear_output()
with output:
try:
if field_name == 'Scalars':
plt.figure(figsize=(12, 5))
for name in self.app.curve_names:
plt.plot(*self.app.get_curve(name), label=display_name(name))
plt.legend()
plt.tight_layout()
show_inline_matplotlib_plots()
else:
value = self.app.get_field(field_name, dim_selection)
if isinstance(value, SampledField):
try:
value = select_channel(value, {'🡡': None, '⬤': 'abs'}.get(self.vector_select.value, self.vector_select.value))
plot(value, **self.config.get('plt_args', {}))
show_inline_matplotlib_plots()
except ValueError as err:
self.figure_display.append_stdout(f"{err}")
else:
self.figure_display.append_stdout(f"{field_name} = {value}")
except Exception:
self.figure_display.append_stdout(traceback.format_exc())
self._last_plot_update_time = time.time()
def play(self, _):
self.waiting_steps = None
def auto_play(self):
self.waiting_steps = None
def pause(self, _):
self.waiting_steps = 0
self.update_widgets()
def step(self, _):
if self.waiting_steps is None:
return
else:
self.waiting_steps += 1
def run_action(self, action: Action):
self.app.run_action(action.name)
self.update_widgets()
def interrupt(self, _):
self._interrupted = True
def on_loop_start(self, _):
self._in_loop = True
self._interrupted = False
for button in self.progression_buttons:
button.layout.visibility = 'visible'
self.loop_parent = (self.kernel._parent_ident, self.kernel._parent_header)
self.kernel.shell_handlers["execute_request"] = lambda *e: self.events.append(e)
self.events = []
self.update_widgets()
def pre_step(self, app):
self._process_kernel_events()
while not self._should_progress():
time.sleep(.1)
self._process_kernel_events()
if self._interrupted:
raise GuiInterrupt()
if self._interrupted:
raise GuiInterrupt()
if self.waiting_steps is not None:
self.waiting_steps -= 1
return # runs loop iteration, then calls post_step
def _process_kernel_events(self, n=10):
for _ in range(n):
self.kernel.set_parent(*self.loop_parent) # ensure stdout still happens in the same cell
self.kernel.do_one_iteration()
self.kernel.set_parent(*self.loop_parent)
def post_step(self, _):
if self._should_progress(): # maybe skip update
update_interval = self.config.get('update_interval')
if update_interval is None:
update_interval = 2.5 if 'google.colab' in sys.modules else 1.2
elapsed = time.time() - self._last_plot_update_time
self.update_widgets(plot=elapsed >= update_interval, scroll_to_last=True)
else:
self.update_widgets(scroll_to_last=True)
def on_loop_exit(self, _):
self._in_loop = False
for button in self.progression_buttons:
button.layout.visibility = 'hidden'
self.update_widgets()
self._process_kernel_events()
self.kernel.shell_handlers["execute_request"] = self.kernel.execute_request
loop = asyncio.get_event_loop()
if loop.is_running():
loop.call_soon(lambda: _replay_events(self.shell, self.events))
else:
warnings.warn("Automatic execution of scheduled cells only works with asyncio based ipython")
def _replay_events(shell, events):
kernel = shell.kernel
sys.stdout.flush()
sys.stderr.flush()
for stream, ident, parent in events:
kernel.set_parent(ident, parent)
if kernel.aborted: # not available for Colab notebooks
return # kernel._send_abort_reply(stream, parent, ident)
else:
kernel.execute_request(stream, ident, parent)
IGNORE_EVENTS = []
@contextmanager
def ignore_events():
IGNORE_EVENTS.append(object())
try:
yield None
finally:
IGNORE_EVENTS.pop(-1)
| mit |
pratapvardhan/scikit-learn | sklearn/datasets/lfw.py | 31 | 19544 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/computation/tests/test_eval.py | 1 | 70198 | #!/usr/bin/env python
# flake8: noqa
import warnings
import operator
from itertools import product
from distutils.version import LooseVersion
import nose
from nose.tools import assert_raises
from numpy.random import randn, rand, randint
import numpy as np
from numpy.testing import assert_allclose
from numpy.testing.decorators import slow
import pandas as pd
from pandas.core import common as com
from pandas import DataFrame, Series, Panel, date_range
from pandas.util.testing import makeCustomDataframe as mkdf
from pandas.computation import pytables
from pandas.computation.engines import _engines, NumExprClobberingError
from pandas.computation.expr import PythonExprVisitor, PandasExprVisitor
from pandas.computation.ops import (_binary_ops_dict,
_special_case_arith_ops_syms,
_arith_ops_syms, _bool_ops_syms,
_unary_math_ops, _binary_math_ops)
import pandas.computation.expr as expr
import pandas.util.testing as tm
import pandas.lib as lib
from pandas.util.testing import (assert_frame_equal, randbool,
assertRaisesRegexp, assert_numpy_array_equal,
assert_produces_warning, assert_series_equal)
from pandas.compat import PY3, u, reduce
_series_frame_incompatible = _bool_ops_syms
_scalar_skip = 'in', 'not in'
def engine_has_neg_frac(engine):
return _engines[engine].has_neg_frac
def _eval_single_bin(lhs, cmp1, rhs, engine):
c = _binary_ops_dict[cmp1]
if engine_has_neg_frac(engine):
try:
return c(lhs, rhs)
except ValueError as e:
if str(e).startswith('negative number cannot be raised to a fractional power'):
return np.nan
raise
return c(lhs, rhs)
def _series_and_2d_ndarray(lhs, rhs):
return ((isinstance(lhs, Series) and
isinstance(rhs, np.ndarray) and rhs.ndim > 1)
or (isinstance(rhs, Series) and
isinstance(lhs, np.ndarray) and lhs.ndim > 1))
def _series_and_frame(lhs, rhs):
return ((isinstance(lhs, Series) and isinstance(rhs, DataFrame))
or (isinstance(rhs, Series) and isinstance(lhs, DataFrame)))
def _bool_and_frame(lhs, rhs):
return isinstance(lhs, bool) and isinstance(rhs, pd.core.generic.NDFrame)
def _is_py3_complex_incompat(result, expected):
return (PY3 and isinstance(expected, (complex, np.complexfloating)) and
np.isnan(result))
_good_arith_ops = com.difference(_arith_ops_syms, _special_case_arith_ops_syms)
class TestEvalNumexprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestEvalNumexprPandas, cls).setUpClass()
tm.skip_if_no_ne()
import numexpr as ne
cls.ne = ne
cls.engine = 'numexpr'
cls.parser = 'pandas'
@classmethod
def tearDownClass(cls):
super(TestEvalNumexprPandas, cls).tearDownClass()
del cls.engine, cls.parser
if hasattr(cls, 'ne'):
del cls.ne
def setup_data(self):
nan_df1 = DataFrame(rand(10, 5))
nan_df1[nan_df1 > 0.5] = np.nan
nan_df2 = DataFrame(rand(10, 5))
nan_df2[nan_df2 > 0.5] = np.nan
self.pandas_lhses = (DataFrame(randn(10, 5)), Series(randn(5)),
Series([1, 2, np.nan, np.nan, 5]), nan_df1)
self.pandas_rhses = (DataFrame(randn(10, 5)), Series(randn(5)),
Series([1, 2, np.nan, np.nan, 5]), nan_df2)
self.scalar_lhses = randn(),
self.scalar_rhses = randn(),
self.lhses = self.pandas_lhses + self.scalar_lhses
self.rhses = self.pandas_rhses + self.scalar_rhses
def setup_ops(self):
self.cmp_ops = expr._cmp_ops_syms
self.cmp2_ops = self.cmp_ops[::-1]
self.bin_ops = expr._bool_ops_syms
self.special_case_ops = _special_case_arith_ops_syms
self.arith_ops = _good_arith_ops
self.unary_ops = '-', '~', 'not '
def setUp(self):
self.setup_ops()
self.setup_data()
self.current_engines = filter(lambda x: x != self.engine, _engines)
def tearDown(self):
del self.lhses, self.rhses, self.scalar_rhses, self.scalar_lhses
del self.pandas_rhses, self.pandas_lhses, self.current_engines
@slow
def test_complex_cmp_ops(self):
cmp_ops = ('!=', '==', '<=', '>=', '<', '>')
cmp2_ops = ('>', '<')
for lhs, cmp1, rhs, binop, cmp2 in product(self.lhses, cmp_ops,
self.rhses, self.bin_ops,
cmp2_ops):
self.check_complex_cmp_op(lhs, cmp1, rhs, binop, cmp2)
def test_simple_cmp_ops(self):
bool_lhses = (DataFrame(randbool(size=(10, 5))),
Series(randbool((5,))), randbool())
bool_rhses = (DataFrame(randbool(size=(10, 5))),
Series(randbool((5,))), randbool())
for lhs, rhs, cmp_op in product(bool_lhses, bool_rhses, self.cmp_ops):
self.check_simple_cmp_op(lhs, cmp_op, rhs)
@slow
def test_binary_arith_ops(self):
for lhs, op, rhs in product(self.lhses, self.arith_ops, self.rhses):
self.check_binary_arith_op(lhs, op, rhs)
def test_modulus(self):
for lhs, rhs in product(self.lhses, self.rhses):
self.check_modulus(lhs, '%', rhs)
def test_floor_division(self):
for lhs, rhs in product(self.lhses, self.rhses):
self.check_floor_division(lhs, '//', rhs)
def test_pow(self):
tm._skip_if_windows()
# odd failure on win32 platform, so skip
for lhs, rhs in product(self.lhses, self.rhses):
self.check_pow(lhs, '**', rhs)
@slow
def test_single_invert_op(self):
for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses):
self.check_single_invert_op(lhs, op, rhs)
@slow
def test_compound_invert_op(self):
for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses):
self.check_compound_invert_op(lhs, op, rhs)
@slow
def test_chained_cmp_op(self):
mids = self.lhses
cmp_ops = '<', '>'
for lhs, cmp1, mid, cmp2, rhs in product(self.lhses, cmp_ops,
mids, cmp_ops, self.rhses):
self.check_chained_cmp_op(lhs, cmp1, mid, cmp2, rhs)
def check_complex_cmp_op(self, lhs, cmp1, rhs, binop, cmp2):
skip_these = _scalar_skip
ex = '(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)'.format(cmp1=cmp1,
binop=binop,
cmp2=cmp2)
scalar_with_in_notin = (lib.isscalar(rhs) and (cmp1 in skip_these or
cmp2 in skip_these))
if scalar_with_in_notin:
with tm.assertRaises(TypeError):
pd.eval(ex, engine=self.engine, parser=self.parser)
self.assertRaises(TypeError, pd.eval, ex, engine=self.engine,
parser=self.parser, local_dict={'lhs': lhs,
'rhs': rhs})
else:
lhs_new = _eval_single_bin(lhs, cmp1, rhs, self.engine)
rhs_new = _eval_single_bin(lhs, cmp2, rhs, self.engine)
if (isinstance(lhs_new, Series) and isinstance(rhs_new, DataFrame)
and binop in _series_frame_incompatible):
pass
# TODO: the code below should be added back when left and right
# hand side bool ops are fixed.
# try:
# self.assertRaises(Exception, pd.eval, ex,
#local_dict={'lhs': lhs, 'rhs': rhs},
# engine=self.engine, parser=self.parser)
# except AssertionError:
#import ipdb; ipdb.set_trace()
# raise
else:
expected = _eval_single_bin(
lhs_new, binop, rhs_new, self.engine)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, expected)
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
skip_these = _scalar_skip
def check_operands(left, right, cmp_op):
return _eval_single_bin(left, cmp_op, right, self.engine)
lhs_new = check_operands(lhs, mid, cmp1)
rhs_new = check_operands(mid, rhs, cmp2)
if lhs_new is not None and rhs_new is not None:
ex1 = 'lhs {0} mid {1} rhs'.format(cmp1, cmp2)
ex2 = 'lhs {0} mid and mid {1} rhs'.format(cmp1, cmp2)
ex3 = '(lhs {0} mid) & (mid {1} rhs)'.format(cmp1, cmp2)
expected = _eval_single_bin(lhs_new, '&', rhs_new, self.engine)
for ex in (ex1, ex2, ex3):
result = pd.eval(ex, engine=self.engine,
parser=self.parser)
tm.assert_numpy_array_equal(result, expected)
def check_simple_cmp_op(self, lhs, cmp1, rhs):
ex = 'lhs {0} rhs'.format(cmp1)
if cmp1 in ('in', 'not in') and not com.is_list_like(rhs):
self.assertRaises(TypeError, pd.eval, ex, engine=self.engine,
parser=self.parser, local_dict={'lhs': lhs,
'rhs': rhs})
else:
expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, expected)
def check_binary_arith_op(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = _eval_single_bin(lhs, arith1, rhs, self.engine)
tm.assert_numpy_array_equal(result, expected)
ex = 'lhs {0} rhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
nlhs = _eval_single_bin(lhs, arith1, rhs,
self.engine)
self.check_alignment(result, nlhs, rhs, arith1)
def check_alignment(self, result, nlhs, ghs, op):
try:
nlhs, ghs = nlhs.align(ghs)
except (ValueError, TypeError, AttributeError):
# ValueError: series frame or frame series align
# TypeError, AttributeError: series or frame with scalar align
pass
else:
expected = self.ne.evaluate('nlhs {0} ghs'.format(op))
tm.assert_numpy_array_equal(result, expected)
# modulus, pow, and floor division require special casing
def check_modulus(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs % rhs
assert_allclose(result, expected)
expected = self.ne.evaluate('expected {0} rhs'.format(arith1))
assert_allclose(result, expected)
def check_floor_division(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
if self.engine == 'python':
res = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs // rhs
tm.assert_numpy_array_equal(res, expected)
else:
self.assertRaises(TypeError, pd.eval, ex, local_dict={'lhs': lhs,
'rhs': rhs},
engine=self.engine, parser=self.parser)
def get_expected_pow_result(self, lhs, rhs):
try:
expected = _eval_single_bin(lhs, '**', rhs, self.engine)
except ValueError as e:
if str(e).startswith('negative number cannot be raised to a fractional power'):
if self.engine == 'python':
raise nose.SkipTest(str(e))
else:
expected = np.nan
else:
raise
return expected
def check_pow(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
expected = self.get_expected_pow_result(lhs, rhs)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
if (lib.isscalar(lhs) and lib.isscalar(rhs) and
_is_py3_complex_incompat(result, expected)):
self.assertRaises(AssertionError, tm.assert_numpy_array_equal,
result, expected)
else:
assert_allclose(result, expected)
ex = '(lhs {0} rhs) {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = self.get_expected_pow_result(
self.get_expected_pow_result(lhs, rhs), rhs)
assert_allclose(result, expected)
def check_single_invert_op(self, lhs, cmp1, rhs):
# simple
for el in (lhs, rhs):
try:
elb = el.astype(bool)
except AttributeError:
elb = np.array([bool(el)])
expected = ~elb
result = pd.eval('~elb', engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(expected, result)
for engine in self.current_engines:
tm.skip_if_no_ne(engine)
tm.assert_numpy_array_equal(result, pd.eval('~elb', engine=engine,
parser=self.parser))
def check_compound_invert_op(self, lhs, cmp1, rhs):
skip_these = 'in', 'not in'
ex = '~(lhs {0} rhs)'.format(cmp1)
if lib.isscalar(rhs) and cmp1 in skip_these:
self.assertRaises(TypeError, pd.eval, ex, engine=self.engine,
parser=self.parser, local_dict={'lhs': lhs,
'rhs': rhs})
else:
# compound
if lib.isscalar(lhs) and lib.isscalar(rhs):
lhs, rhs = map(lambda x: np.array([x]), (lhs, rhs))
expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)
if lib.isscalar(expected):
expected = not expected
else:
expected = ~expected
result = pd.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(expected, result)
# make sure the other engines work the same as this one
for engine in self.current_engines:
tm.skip_if_no_ne(engine)
ev = pd.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(ev, result)
def ex(self, op, var_name='lhs'):
return '{0}{1}'.format(op, var_name)
def test_frame_invert(self):
expr = self.ex('~')
# ~ ##
# frame
# float always raises
lhs = DataFrame(randn(5, 2))
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
# int raises on numexpr
lhs = DataFrame(randint(5, size=(5, 2)))
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# bool always works
lhs = DataFrame(rand(5, 2) > 0.5)
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# object raises
lhs = DataFrame({'b': ['a', 1, 2.0], 'c': rand(3) > 0.5})
if self.engine == 'numexpr':
with tm.assertRaises(ValueError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
def test_series_invert(self):
# ~ ####
expr = self.ex('~')
# series
# float raises
lhs = Series(randn(5))
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
# int raises on numexpr
lhs = Series(randint(5, size=5))
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# bool
lhs = Series(rand(5) > 0.5)
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# float
# int
# bool
# object
lhs = Series(['a', 1, 2.0])
if self.engine == 'numexpr':
with tm.assertRaises(ValueError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
def test_frame_negate(self):
expr = self.ex('-')
# float
lhs = DataFrame(randn(5, 2))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# int
lhs = DataFrame(randint(5, size=(5, 2)))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = DataFrame(rand(5, 2) > 0.5)
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
def test_series_negate(self):
expr = self.ex('-')
# float
lhs = Series(randn(5))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# int
lhs = Series(randint(5, size=5))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = Series(rand(5) > 0.5)
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
def test_frame_pos(self):
expr = self.ex('+')
# float
lhs = DataFrame(randn(5, 2))
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# int
lhs = DataFrame(randint(5, size=(5, 2)))
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = DataFrame(rand(5, 2) > 0.5)
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
def test_series_pos(self):
expr = self.ex('+')
# float
lhs = Series(randn(5))
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# int
lhs = Series(randint(5, size=5))
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = Series(rand(5) > 0.5)
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
def test_scalar_unary(self):
with tm.assertRaises(TypeError):
pd.eval('~1.0', engine=self.engine, parser=self.parser)
self.assertEqual(
pd.eval('-1.0', parser=self.parser, engine=self.engine), -1.0)
self.assertEqual(
pd.eval('+1.0', parser=self.parser, engine=self.engine), +1.0)
self.assertEqual(
pd.eval('~1', parser=self.parser, engine=self.engine), ~1)
self.assertEqual(
pd.eval('-1', parser=self.parser, engine=self.engine), -1)
self.assertEqual(
pd.eval('+1', parser=self.parser, engine=self.engine), +1)
self.assertEqual(
pd.eval('~True', parser=self.parser, engine=self.engine), ~True)
self.assertEqual(
pd.eval('~False', parser=self.parser, engine=self.engine), ~False)
self.assertEqual(
pd.eval('-True', parser=self.parser, engine=self.engine), -True)
self.assertEqual(
pd.eval('-False', parser=self.parser, engine=self.engine), -False)
self.assertEqual(
pd.eval('+True', parser=self.parser, engine=self.engine), +True)
self.assertEqual(
pd.eval('+False', parser=self.parser, engine=self.engine), +False)
def test_unary_in_array(self):
# GH 11235
assert_numpy_array_equal(
pd.eval('[-True, True, ~True, +True,'
'-False, False, ~False, +False,'
'-37, 37, ~37, +37]'),
np.array([-True, True, ~True, +True,
-False, False, ~False, +False,
-37, 37, ~37, +37]))
def test_disallow_scalar_bool_ops(self):
exprs = '1 or 2', '1 and 2'
exprs += 'a and b', 'a or b'
exprs += '1 or 2 and (3 + 2) > 3',
exprs += '2 * x > 2 or 1 and 2',
exprs += '2 * df > 3 and 1 or a',
x, a, b, df = np.random.randn(3), 1, 2, DataFrame(randn(3, 2))
for ex in exprs:
with tm.assertRaises(NotImplementedError):
pd.eval(ex, engine=self.engine, parser=self.parser)
def test_identical(self):
# GH 10546
x = 1
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, 1)
self.assertTrue(lib.isscalar(result))
x = 1.5
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, 1.5)
self.assertTrue(lib.isscalar(result))
x = False
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, False)
self.assertTrue(lib.isscalar(result))
x = np.array([1])
result = pd.eval('x', engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, np.array([1]))
self.assertEqual(result.shape, (1, ))
x = np.array([1.5])
result = pd.eval('x', engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, np.array([1.5]))
self.assertEqual(result.shape, (1, ))
x = np.array([False])
result = pd.eval('x', engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, np.array([False]))
self.assertEqual(result.shape, (1, ))
def test_line_continuation(self):
# GH 11149
exp = """1 + 2 * \
5 - 1 + 2 """
result = pd.eval(exp, engine=self.engine, parser=self.parser)
self.assertEqual(result, 12)
class TestEvalNumexprPython(TestEvalNumexprPandas):
@classmethod
def setUpClass(cls):
super(TestEvalNumexprPython, cls).setUpClass()
tm.skip_if_no_ne()
import numexpr as ne
cls.ne = ne
cls.engine = 'numexpr'
cls.parser = 'python'
def setup_ops(self):
self.cmp_ops = list(filter(lambda x: x not in ('in', 'not in'),
expr._cmp_ops_syms))
self.cmp2_ops = self.cmp_ops[::-1]
self.bin_ops = [s for s in expr._bool_ops_syms
if s not in ('and', 'or')]
self.special_case_ops = _special_case_arith_ops_syms
self.arith_ops = _good_arith_ops
self.unary_ops = '+', '-', '~'
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
ex1 = 'lhs {0} mid {1} rhs'.format(cmp1, cmp2)
with tm.assertRaises(NotImplementedError):
pd.eval(ex1, engine=self.engine, parser=self.parser)
class TestEvalPythonPython(TestEvalNumexprPython):
@classmethod
def setUpClass(cls):
super(TestEvalPythonPython, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'python'
def check_modulus(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs % rhs
assert_allclose(result, expected)
expected = _eval_single_bin(expected, arith1, rhs, self.engine)
assert_allclose(result, expected)
def check_alignment(self, result, nlhs, ghs, op):
try:
nlhs, ghs = nlhs.align(ghs)
except (ValueError, TypeError, AttributeError):
# ValueError: series frame or frame series align
# TypeError, AttributeError: series or frame with scalar align
pass
else:
expected = eval('nlhs {0} ghs'.format(op))
tm.assert_numpy_array_equal(result, expected)
class TestEvalPythonPandas(TestEvalPythonPython):
@classmethod
def setUpClass(cls):
super(TestEvalPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
TestEvalNumexprPandas.check_chained_cmp_op(self, lhs, cmp1, mid, cmp2,
rhs)
f = lambda *args, **kwargs: np.random.randn()
ENGINES_PARSERS = list(product(_engines, expr._parsers))
#-------------------------------------
# basic and complex alignment
def _is_datetime(x):
return issubclass(x.dtype.type, np.datetime64)
def should_warn(*args):
not_mono = not any(map(operator.attrgetter('is_monotonic'), args))
only_one_dt = reduce(operator.xor, map(_is_datetime, args))
return not_mono and only_one_dt
class TestAlignment(object):
index_types = 'i', 'u', 'dt'
lhs_index_types = index_types + ('s',) # 'p'
def check_align_nested_unary_op(self, engine, parser):
tm.skip_if_no_ne(engine)
s = 'df * ~2'
df = mkdf(5, 3, data_gen_f=f)
res = pd.eval(s, engine=engine, parser=parser)
assert_frame_equal(res, df * ~2)
def test_align_nested_unary_op(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_align_nested_unary_op, engine, parser
def check_basic_frame_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
args = product(self.lhs_index_types, self.index_types,
self.index_types)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
for lr_idx_type, rr_idx_type, c_idx_type in args:
df = mkdf(10, 10, data_gen_f=f, r_idx_type=lr_idx_type,
c_idx_type=c_idx_type)
df2 = mkdf(20, 10, data_gen_f=f, r_idx_type=rr_idx_type,
c_idx_type=c_idx_type)
# only warns if not monotonic and not sortable
if should_warn(df.index, df2.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval('df + df2', engine=engine, parser=parser)
else:
res = pd.eval('df + df2', engine=engine, parser=parser)
assert_frame_equal(res, df + df2)
def test_basic_frame_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_basic_frame_alignment, engine, parser
def check_frame_comparison(self, engine, parser):
tm.skip_if_no_ne(engine)
args = product(self.lhs_index_types, repeat=2)
for r_idx_type, c_idx_type in args:
df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type,
c_idx_type=c_idx_type)
res = pd.eval('df < 2', engine=engine, parser=parser)
assert_frame_equal(res, df < 2)
df3 = DataFrame(randn(*df.shape), index=df.index,
columns=df.columns)
res = pd.eval('df < df3', engine=engine, parser=parser)
assert_frame_equal(res, df < df3)
def test_frame_comparison(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_frame_comparison, engine, parser
def check_medium_complex_frame_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
args = product(self.lhs_index_types, self.index_types,
self.index_types, self.index_types)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
for r1, c1, r2, c2 in args:
df = mkdf(3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1)
df2 = mkdf(4, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)
df3 = mkdf(5, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)
if should_warn(df.index, df2.index, df3.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval('df + df2 + df3', engine=engine,
parser=parser)
else:
res = pd.eval('df + df2 + df3',
engine=engine, parser=parser)
assert_frame_equal(res, df + df2 + df3)
@slow
def test_medium_complex_frame_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_medium_complex_frame_alignment, engine, parser
def check_basic_frame_series_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
def testit(r_idx_type, c_idx_type, index_name):
df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type,
c_idx_type=c_idx_type)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
if should_warn(df.index, s.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval('df + s', engine=engine, parser=parser)
else:
res = pd.eval('df + s', engine=engine, parser=parser)
if r_idx_type == 'dt' or c_idx_type == 'dt':
expected = df.add(s) if engine == 'numexpr' else df + s
else:
expected = df + s
assert_frame_equal(res, expected)
args = product(self.lhs_index_types, self.index_types,
('index', 'columns'))
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
for r_idx_type, c_idx_type, index_name in args:
testit(r_idx_type, c_idx_type, index_name)
def test_basic_frame_series_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_basic_frame_series_alignment, engine, parser
def check_basic_series_frame_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
def testit(r_idx_type, c_idx_type, index_name):
df = mkdf(10, 7, data_gen_f=f, r_idx_type=r_idx_type,
c_idx_type=c_idx_type)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
if should_warn(s.index, df.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval('s + df', engine=engine, parser=parser)
else:
res = pd.eval('s + df', engine=engine, parser=parser)
if r_idx_type == 'dt' or c_idx_type == 'dt':
expected = df.add(s) if engine == 'numexpr' else s + df
else:
expected = s + df
assert_frame_equal(res, expected)
# only test dt with dt, otherwise weird joins result
args = product(['i', 'u', 's'], ['i', 'u', 's'], ('index', 'columns'))
with warnings.catch_warnings(record=True):
for r_idx_type, c_idx_type, index_name in args:
testit(r_idx_type, c_idx_type, index_name)
# dt with dt
args = product(['dt'], ['dt'], ('index', 'columns'))
with warnings.catch_warnings(record=True):
for r_idx_type, c_idx_type, index_name in args:
testit(r_idx_type, c_idx_type, index_name)
def test_basic_series_frame_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_basic_series_frame_alignment, engine, parser
def check_series_frame_commutativity(self, engine, parser):
tm.skip_if_no_ne(engine)
args = product(self.lhs_index_types, self.index_types, ('+', '*'),
('index', 'columns'))
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
for r_idx_type, c_idx_type, op, index_name in args:
df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type,
c_idx_type=c_idx_type)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
lhs = 's {0} df'.format(op)
rhs = 'df {0} s'.format(op)
if should_warn(df.index, s.index):
with tm.assert_produces_warning(RuntimeWarning):
a = pd.eval(lhs, engine=engine, parser=parser)
with tm.assert_produces_warning(RuntimeWarning):
b = pd.eval(rhs, engine=engine, parser=parser)
else:
a = pd.eval(lhs, engine=engine, parser=parser)
b = pd.eval(rhs, engine=engine, parser=parser)
if r_idx_type != 'dt' and c_idx_type != 'dt':
if engine == 'numexpr':
assert_frame_equal(a, b)
def test_series_frame_commutativity(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_series_frame_commutativity, engine, parser
def check_complex_series_frame_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
import random
args = product(self.lhs_index_types, self.index_types,
self.index_types, self.index_types)
n = 3
m1 = 5
m2 = 2 * m1
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
for r1, r2, c1, c2 in args:
index_name = random.choice(['index', 'columns'])
obj_name = random.choice(['df', 'df2'])
df = mkdf(m1, n, data_gen_f=f, r_idx_type=r1, c_idx_type=c1)
df2 = mkdf(m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)
index = getattr(locals().get(obj_name), index_name)
s = Series(np.random.randn(n), index[:n])
if r2 == 'dt' or c2 == 'dt':
if engine == 'numexpr':
expected2 = df2.add(s)
else:
expected2 = df2 + s
else:
expected2 = df2 + s
if r1 == 'dt' or c1 == 'dt':
if engine == 'numexpr':
expected = expected2.add(df)
else:
expected = expected2 + df
else:
expected = expected2 + df
if should_warn(df2.index, s.index, df.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval('df2 + s + df', engine=engine,
parser=parser)
else:
res = pd.eval('df2 + s + df', engine=engine, parser=parser)
tm.assert_equal(res.shape, expected.shape)
assert_frame_equal(res, expected)
@slow
def test_complex_series_frame_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_complex_series_frame_alignment, engine, parser
def check_performance_warning_for_poor_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(1000, 10))
s = Series(randn(10000))
if engine == 'numexpr':
seen = pd.core.common.PerformanceWarning
else:
seen = False
with assert_produces_warning(seen):
pd.eval('df + s', engine=engine, parser=parser)
s = Series(randn(1000))
with assert_produces_warning(False):
pd.eval('df + s', engine=engine, parser=parser)
df = DataFrame(randn(10, 10000))
s = Series(randn(10000))
with assert_produces_warning(False):
pd.eval('df + s', engine=engine, parser=parser)
df = DataFrame(randn(10, 10))
s = Series(randn(10000))
is_python_engine = engine == 'python'
if not is_python_engine:
wrn = pd.core.common.PerformanceWarning
else:
wrn = False
with assert_produces_warning(wrn) as w:
pd.eval('df + s', engine=engine, parser=parser)
if not is_python_engine:
tm.assert_equal(len(w), 1)
msg = str(w[0].message)
expected = ("Alignment difference on axis {0} is larger"
" than an order of magnitude on term {1!r}, "
"by more than {2:.4g}; performance may suffer"
"".format(1, 'df', np.log10(s.size - df.shape[1])))
tm.assert_equal(msg, expected)
def test_performance_warning_for_poor_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield (self.check_performance_warning_for_poor_alignment, engine,
parser)
#------------------------------------
# slightly more complex ops
class TestOperationsNumExprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestOperationsNumExprPandas, cls).setUpClass()
tm.skip_if_no_ne()
cls.engine = 'numexpr'
cls.parser = 'pandas'
cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms
@classmethod
def tearDownClass(cls):
super(TestOperationsNumExprPandas, cls).tearDownClass()
del cls.engine, cls.parser
def eval(self, *args, **kwargs):
kwargs['engine'] = self.engine
kwargs['parser'] = self.parser
kwargs['level'] = kwargs.pop('level', 0) + 1
return pd.eval(*args, **kwargs)
def test_simple_arith_ops(self):
ops = self.arith_ops
for op in filter(lambda x: x != '//', ops):
ex = '1 {0} 1'.format(op)
ex2 = 'x {0} 1'.format(op)
ex3 = '1 {0} (x + 1)'.format(op)
if op in ('in', 'not in'):
self.assertRaises(TypeError, pd.eval, ex,
engine=self.engine, parser=self.parser)
else:
expec = _eval_single_bin(1, op, 1, self.engine)
x = self.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_equal(x, expec)
expec = _eval_single_bin(x, op, 1, self.engine)
y = self.eval(ex2, local_dict={'x': x}, engine=self.engine,
parser=self.parser)
tm.assert_equal(y, expec)
expec = _eval_single_bin(1, op, x + 1, self.engine)
y = self.eval(ex3, local_dict={'x': x},
engine=self.engine, parser=self.parser)
tm.assert_equal(y, expec)
def test_simple_bool_ops(self):
for op, lhs, rhs in product(expr._bool_ops_syms, (True, False),
(True, False)):
ex = '{0} {1} {2}'.format(lhs, op, rhs)
res = self.eval(ex)
exp = eval(ex)
self.assertEqual(res, exp)
def test_bool_ops_with_constants(self):
for op, lhs, rhs in product(expr._bool_ops_syms, ('True', 'False'),
('True', 'False')):
ex = '{0} {1} {2}'.format(lhs, op, rhs)
res = self.eval(ex)
exp = eval(ex)
self.assertEqual(res, exp)
def test_panel_fails(self):
x = Panel(randn(3, 4, 5))
y = Series(randn(10))
assert_raises(NotImplementedError, self.eval, 'x + y',
local_dict={'x': x, 'y': y})
def test_4d_ndarray_fails(self):
x = randn(3, 4, 5, 6)
y = Series(randn(10))
assert_raises(NotImplementedError, self.eval, 'x + y',
local_dict={'x': x, 'y': y})
def test_constant(self):
x = self.eval('1')
tm.assert_equal(x, 1)
def test_single_variable(self):
df = DataFrame(randn(10, 2))
df2 = self.eval('df', local_dict={'df': df})
assert_frame_equal(df, df2)
def test_truediv(self):
s = np.array([1])
ex = 's / 1'
d = {'s': s}
if PY3:
res = self.eval(ex, truediv=False)
tm.assert_numpy_array_equal(res, np.array([1.0]))
res = self.eval(ex, truediv=True)
tm.assert_numpy_array_equal(res, np.array([1.0]))
res = self.eval('1 / 2', truediv=True)
expec = 0.5
self.assertEqual(res, expec)
res = self.eval('1 / 2', truediv=False)
expec = 0.5
self.assertEqual(res, expec)
res = self.eval('s / 2', truediv=False)
expec = 0.5
self.assertEqual(res, expec)
res = self.eval('s / 2', truediv=True)
expec = 0.5
self.assertEqual(res, expec)
else:
res = self.eval(ex, truediv=False)
tm.assert_numpy_array_equal(res, np.array([1]))
res = self.eval(ex, truediv=True)
tm.assert_numpy_array_equal(res, np.array([1.0]))
res = self.eval('1 / 2', truediv=True)
expec = 0.5
self.assertEqual(res, expec)
res = self.eval('1 / 2', truediv=False)
expec = 0
self.assertEqual(res, expec)
res = self.eval('s / 2', truediv=False)
expec = 0
self.assertEqual(res, expec)
res = self.eval('s / 2', truediv=True)
expec = 0.5
self.assertEqual(res, expec)
def test_failing_subscript_with_name_error(self):
df = DataFrame(np.random.randn(5, 3))
with tm.assertRaises(NameError):
self.eval('df[x > 2] > 2')
def test_lhs_expression_subscript(self):
df = DataFrame(np.random.randn(5, 3))
result = self.eval('(df + 1)[df > 2]', local_dict={'df': df})
expected = (df + 1)[df > 2]
assert_frame_equal(result, expected)
def test_attr_expression(self):
df = DataFrame(np.random.randn(5, 3), columns=list('abc'))
expr1 = 'df.a < df.b'
expec1 = df.a < df.b
expr2 = 'df.a + df.b + df.c'
expec2 = df.a + df.b + df.c
expr3 = 'df.a + df.b + df.c[df.b < 0]'
expec3 = df.a + df.b + df.c[df.b < 0]
exprs = expr1, expr2, expr3
expecs = expec1, expec2, expec3
for e, expec in zip(exprs, expecs):
assert_series_equal(expec, self.eval(e, local_dict={'df': df}))
def test_assignment_fails(self):
df = DataFrame(np.random.randn(5, 3), columns=list('abc'))
df2 = DataFrame(np.random.randn(5, 3))
expr1 = 'df = df2'
self.assertRaises(ValueError, self.eval, expr1,
local_dict={'df': df, 'df2': df2})
def test_assignment_column(self):
tm.skip_if_no_ne('numexpr')
df = DataFrame(np.random.randn(5, 2), columns=list('ab'))
orig_df = df.copy()
# multiple assignees
self.assertRaises(SyntaxError, df.eval, 'd c = a + b')
# invalid assignees
self.assertRaises(SyntaxError, df.eval, 'd,c = a + b')
self.assertRaises(
SyntaxError, df.eval, 'Timestamp("20131001") = a + b')
# single assignment - existing variable
expected = orig_df.copy()
expected['a'] = expected['a'] + expected['b']
df = orig_df.copy()
df.eval('a = a + b', inplace=True)
assert_frame_equal(df, expected)
# single assignment - new variable
expected = orig_df.copy()
expected['c'] = expected['a'] + expected['b']
df = orig_df.copy()
df.eval('c = a + b', inplace=True)
assert_frame_equal(df, expected)
# with a local name overlap
def f():
df = orig_df.copy()
a = 1 # noqa
df.eval('a = 1 + b', inplace=True)
return df
df = f()
expected = orig_df.copy()
expected['a'] = 1 + expected['b']
assert_frame_equal(df, expected)
df = orig_df.copy()
def f():
a = 1 # noqa
old_a = df.a.copy()
df.eval('a = a + b', inplace=True)
result = old_a + df.b
assert_series_equal(result, df.a, check_names=False)
self.assertTrue(result.name is None)
f()
# multiple assignment
df = orig_df.copy()
df.eval('c = a + b', inplace=True)
self.assertRaises(SyntaxError, df.eval, 'c = a = b')
# explicit targets
df = orig_df.copy()
self.eval('c = df.a + df.b', local_dict={'df': df},
target=df, inplace=True)
expected = orig_df.copy()
expected['c'] = expected['a'] + expected['b']
assert_frame_equal(df, expected)
def test_column_in(self):
# GH 11235
df = DataFrame({'a': [11], 'b': [-32]})
result = df.eval('a in [11, -32]')
expected = Series([True])
assert_series_equal(result, expected)
def assignment_not_inplace(self):
# GH 9297
tm.skip_if_no_ne('numexpr')
df = DataFrame(np.random.randn(5, 2), columns=list('ab'))
actual = df.eval('c = a + b', inplace=False)
self.assertIsNotNone(actual)
expected = df.copy()
expected['c'] = expected['a'] + expected['b']
assert_frame_equal(df, expected)
# default for inplace will change
with tm.assert_produces_warnings(FutureWarning):
df.eval('c = a + b')
# but don't warn without assignment
with tm.assert_produces_warnings(None):
df.eval('a + b')
def test_multi_line_expression(self):
# GH 11149
tm.skip_if_no_ne('numexpr')
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
expected = df.copy()
expected['c'] = expected['a'] + expected['b']
expected['d'] = expected['c'] + expected['b']
ans = df.eval("""
c = a + b
d = c + b""", inplace=True)
assert_frame_equal(expected, df)
self.assertIsNone(ans)
expected['a'] = expected['a'] - 1
expected['e'] = expected['a'] + 2
ans = df.eval("""
a = a - 1
e = a + 2""", inplace=True)
assert_frame_equal(expected, df)
self.assertIsNone(ans)
# multi-line not valid if not all assignments
with tm.assertRaises(ValueError):
df.eval("""
a = b + 2
b - 2""", inplace=False)
def test_multi_line_expression_not_inplace(self):
# GH 11149
tm.skip_if_no_ne('numexpr')
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
expected = df.copy()
expected['c'] = expected['a'] + expected['b']
expected['d'] = expected['c'] + expected['b']
df = df.eval("""
c = a + b
d = c + b""", inplace=False)
assert_frame_equal(expected, df)
expected['a'] = expected['a'] - 1
expected['e'] = expected['a'] + 2
df = df.eval("""
a = a - 1
e = a + 2""", inplace=False)
assert_frame_equal(expected, df)
def test_assignment_in_query(self):
# GH 8664
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
df_orig = df.copy()
with tm.assertRaises(ValueError):
df.query('a = 1')
assert_frame_equal(df, df_orig)
def query_inplace(self):
# GH 11149
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
expected = df.copy()
expected = expected[expected['a'] == 2]
df.query('a == 2', inplace=True)
assert_frame_equal(expected, df)
def test_basic_period_index_boolean_expression(self):
df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i')
e = df < 2
r = self.eval('df < 2', local_dict={'df': df})
x = df < 2
assert_frame_equal(r, e)
assert_frame_equal(x, e)
def test_basic_period_index_subscript_expression(self):
df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i')
r = self.eval('df[df < 2 + 3]', local_dict={'df': df})
e = df[df < 2 + 3]
assert_frame_equal(r, e)
def test_nested_period_index_subscript_expression(self):
df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i')
r = self.eval('df[df[df < 2] < 2] + df * 2', local_dict={'df': df})
e = df[df[df < 2] < 2] + df * 2
assert_frame_equal(r, e)
def test_date_boolean(self):
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
res = self.eval('df.dates1 < 20130101', local_dict={'df': df},
engine=self.engine, parser=self.parser)
expec = df.dates1 < '20130101'
assert_series_equal(res, expec, check_names=False)
def test_simple_in_ops(self):
if self.parser != 'python':
res = pd.eval('1 in [1, 2]', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('2 in (1, 2)', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('3 in (1, 2)', engine=self.engine,
parser=self.parser)
self.assertFalse(res)
res = pd.eval('3 not in (1, 2)', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('[3] not in (1, 2)', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('[3] in ([3], 2)', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('[[3]] in [[[3]], 2]', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('(3,) in [(3,), 2]', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('(3,) not in [(3,), 2]', engine=self.engine,
parser=self.parser)
self.assertFalse(res)
res = pd.eval('[(3,)] in [[(3,)], 2]', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
else:
with tm.assertRaises(NotImplementedError):
pd.eval('1 in [1, 2]', engine=self.engine, parser=self.parser)
with tm.assertRaises(NotImplementedError):
pd.eval('2 in (1, 2)', engine=self.engine, parser=self.parser)
with tm.assertRaises(NotImplementedError):
pd.eval('3 in (1, 2)', engine=self.engine, parser=self.parser)
with tm.assertRaises(NotImplementedError):
pd.eval('3 not in (1, 2)', engine=self.engine,
parser=self.parser)
with tm.assertRaises(NotImplementedError):
pd.eval('[(3,)] in (1, 2, [(3,)])', engine=self.engine,
parser=self.parser)
with tm.assertRaises(NotImplementedError):
pd.eval('[3] not in (1, 2, [[3]])', engine=self.engine,
parser=self.parser)
class TestOperationsNumExprPython(TestOperationsNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestOperationsNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms
cls.arith_ops = filter(lambda x: x not in ('in', 'not in'),
cls.arith_ops)
def test_check_many_exprs(self):
a = 1
expr = ' * '.join('a' * 33)
expected = 1
res = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_equal(res, expected)
def test_fails_and(self):
df = DataFrame(np.random.randn(5, 3))
self.assertRaises(NotImplementedError, pd.eval, 'df > 2 and df > 3',
local_dict={'df': df}, parser=self.parser,
engine=self.engine)
def test_fails_or(self):
df = DataFrame(np.random.randn(5, 3))
self.assertRaises(NotImplementedError, pd.eval, 'df > 2 or df > 3',
local_dict={'df': df}, parser=self.parser,
engine=self.engine)
def test_fails_not(self):
df = DataFrame(np.random.randn(5, 3))
self.assertRaises(NotImplementedError, pd.eval, 'not df > 2',
local_dict={'df': df}, parser=self.parser,
engine=self.engine)
def test_fails_ampersand(self):
df = DataFrame(np.random.randn(5, 3))
ex = '(df + 2)[df > 1] > 0 & (df > 0)'
with tm.assertRaises(NotImplementedError):
pd.eval(ex, parser=self.parser, engine=self.engine)
def test_fails_pipe(self):
df = DataFrame(np.random.randn(5, 3))
ex = '(df + 2)[df > 1] > 0 | (df > 0)'
with tm.assertRaises(NotImplementedError):
pd.eval(ex, parser=self.parser, engine=self.engine)
def test_bool_ops_with_constants(self):
for op, lhs, rhs in product(expr._bool_ops_syms, ('True', 'False'),
('True', 'False')):
ex = '{0} {1} {2}'.format(lhs, op, rhs)
if op in ('and', 'or'):
with tm.assertRaises(NotImplementedError):
self.eval(ex)
else:
res = self.eval(ex)
exp = eval(ex)
self.assertEqual(res, exp)
def test_simple_bool_ops(self):
for op, lhs, rhs in product(expr._bool_ops_syms, (True, False),
(True, False)):
ex = 'lhs {0} rhs'.format(op)
if op in ('and', 'or'):
with tm.assertRaises(NotImplementedError):
pd.eval(ex, engine=self.engine, parser=self.parser)
else:
res = pd.eval(ex, engine=self.engine, parser=self.parser)
exp = eval(ex)
self.assertEqual(res, exp)
class TestOperationsPythonPython(TestOperationsNumExprPython):
@classmethod
def setUpClass(cls):
super(TestOperationsPythonPython, cls).setUpClass()
cls.engine = cls.parser = 'python'
cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms
cls.arith_ops = filter(lambda x: x not in ('in', 'not in'),
cls.arith_ops)
class TestOperationsPythonPandas(TestOperationsNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestOperationsPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms
class TestMathPythonPython(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestMathPythonPython, cls).setUpClass()
tm.skip_if_no_ne()
cls.engine = 'python'
cls.parser = 'pandas'
cls.unary_fns = _unary_math_ops
cls.binary_fns = _binary_math_ops
@classmethod
def tearDownClass(cls):
del cls.engine, cls.parser
def eval(self, *args, **kwargs):
kwargs['engine'] = self.engine
kwargs['parser'] = self.parser
kwargs['level'] = kwargs.pop('level', 0) + 1
return pd.eval(*args, **kwargs)
def test_unary_functions(self):
df = DataFrame({'a': np.random.randn(10)})
a = df.a
for fn in self.unary_fns:
expr = "{0}(a)".format(fn)
got = self.eval(expr)
expect = getattr(np, fn)(a)
tm.assert_series_equal(got, expect, check_names=False)
def test_binary_functions(self):
df = DataFrame({'a': np.random.randn(10),
'b': np.random.randn(10)})
a = df.a
b = df.b
for fn in self.binary_fns:
expr = "{0}(a, b)".format(fn)
got = self.eval(expr)
expect = getattr(np, fn)(a, b)
np.testing.assert_allclose(got, expect)
def test_df_use_case(self):
df = DataFrame({'a': np.random.randn(10),
'b': np.random.randn(10)})
df.eval("e = arctan2(sin(a), b)",
engine=self.engine,
parser=self.parser, inplace=True)
got = df.e
expect = np.arctan2(np.sin(df.a), df.b)
tm.assert_series_equal(got, expect, check_names=False)
def test_df_arithmetic_subexpression(self):
df = DataFrame({'a': np.random.randn(10),
'b': np.random.randn(10)})
df.eval("e = sin(a + b)",
engine=self.engine,
parser=self.parser, inplace=True)
got = df.e
expect = np.sin(df.a + df.b)
tm.assert_series_equal(got, expect, check_names=False)
def check_result_type(self, dtype, expect_dtype):
df = DataFrame({'a': np.random.randn(10).astype(dtype)})
self.assertEqual(df.a.dtype, dtype)
df.eval("b = sin(a)",
engine=self.engine,
parser=self.parser, inplace=True)
got = df.b
expect = np.sin(df.a)
self.assertEqual(expect.dtype, got.dtype)
self.assertEqual(expect_dtype, got.dtype)
tm.assert_series_equal(got, expect, check_names=False)
def test_result_types(self):
self.check_result_type(np.int32, np.float64)
self.check_result_type(np.int64, np.float64)
self.check_result_type(np.float32, np.float32)
self.check_result_type(np.float64, np.float64)
def test_result_types2(self):
# xref https://github.com/pydata/pandas/issues/12293
raise nose.SkipTest("unreliable tests on complex128")
# Did not test complex64 because DataFrame is converting it to
# complex128. Due to https://github.com/pydata/pandas/issues/10952
self.check_result_type(np.complex128, np.complex128)
def test_undefined_func(self):
df = DataFrame({'a': np.random.randn(10)})
with tm.assertRaisesRegexp(ValueError,
"\"mysin\" is not a supported function"):
df.eval("mysin(a)",
engine=self.engine,
parser=self.parser)
def test_keyword_arg(self):
df = DataFrame({'a': np.random.randn(10)})
with tm.assertRaisesRegexp(TypeError,
"Function \"sin\" does not support "
"keyword arguments"):
df.eval("sin(x=a)",
engine=self.engine,
parser=self.parser)
class TestMathPythonPandas(TestMathPythonPython):
@classmethod
def setUpClass(cls):
super(TestMathPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
class TestMathNumExprPandas(TestMathPythonPython):
@classmethod
def setUpClass(cls):
super(TestMathNumExprPandas, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'pandas'
class TestMathNumExprPython(TestMathPythonPython):
@classmethod
def setUpClass(cls):
super(TestMathNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
_var_s = randn(10)
class TestScope(object):
def check_global_scope(self, e, engine, parser):
tm.skip_if_no_ne(engine)
tm.assert_numpy_array_equal(_var_s * 2, pd.eval(e, engine=engine,
parser=parser))
def test_global_scope(self):
e = '_var_s * 2'
for engine, parser in product(_engines, expr._parsers):
yield self.check_global_scope, e, engine, parser
def check_no_new_locals(self, engine, parser):
tm.skip_if_no_ne(engine)
x = 1
lcls = locals().copy()
pd.eval('x + 1', local_dict=lcls, engine=engine, parser=parser)
lcls2 = locals().copy()
lcls2.pop('lcls')
tm.assert_equal(lcls, lcls2)
def test_no_new_locals(self):
for engine, parser in product(_engines, expr._parsers):
yield self.check_no_new_locals, engine, parser
def check_no_new_globals(self, engine, parser):
tm.skip_if_no_ne(engine)
x = 1
gbls = globals().copy()
pd.eval('x + 1', engine=engine, parser=parser)
gbls2 = globals().copy()
tm.assert_equal(gbls, gbls2)
def test_no_new_globals(self):
for engine, parser in product(_engines, expr._parsers):
yield self.check_no_new_globals, engine, parser
def test_invalid_engine():
tm.skip_if_no_ne()
assertRaisesRegexp(KeyError, 'Invalid engine \'asdf\' passed',
pd.eval, 'x + y', local_dict={'x': 1, 'y': 2},
engine='asdf')
def test_invalid_parser():
tm.skip_if_no_ne()
assertRaisesRegexp(KeyError, 'Invalid parser \'asdf\' passed',
pd.eval, 'x + y', local_dict={'x': 1, 'y': 2},
parser='asdf')
_parsers = {'python': PythonExprVisitor, 'pytables': pytables.ExprVisitor,
'pandas': PandasExprVisitor}
def check_disallowed_nodes(engine, parser):
tm.skip_if_no_ne(engine)
VisitorClass = _parsers[parser]
uns_ops = VisitorClass.unsupported_nodes
inst = VisitorClass('x + 1', engine, parser)
for ops in uns_ops:
assert_raises(NotImplementedError, getattr(inst, ops))
def test_disallowed_nodes():
for engine, visitor in product(_parsers, repeat=2):
yield check_disallowed_nodes, engine, visitor
def check_syntax_error_exprs(engine, parser):
tm.skip_if_no_ne(engine)
e = 's +'
assert_raises(SyntaxError, pd.eval, e, engine=engine, parser=parser)
def test_syntax_error_exprs():
for engine, parser in ENGINES_PARSERS:
yield check_syntax_error_exprs, engine, parser
def check_name_error_exprs(engine, parser):
tm.skip_if_no_ne(engine)
e = 's + t'
with tm.assertRaises(NameError):
pd.eval(e, engine=engine, parser=parser)
def test_name_error_exprs():
for engine, parser in ENGINES_PARSERS:
yield check_name_error_exprs, engine, parser
def check_invalid_local_variable_reference(engine, parser):
tm.skip_if_no_ne(engine)
a, b = 1, 2
exprs = 'a + @b', '@a + b', '@a + @b'
for expr in exprs:
if parser != 'pandas':
with tm.assertRaisesRegexp(SyntaxError, "The '@' prefix is only"):
pd.eval(exprs, engine=engine, parser=parser)
else:
with tm.assertRaisesRegexp(SyntaxError, "The '@' prefix is not"):
pd.eval(exprs, engine=engine, parser=parser)
def test_invalid_local_variable_reference():
for engine, parser in ENGINES_PARSERS:
yield check_invalid_local_variable_reference, engine, parser
def check_numexpr_builtin_raises(engine, parser):
tm.skip_if_no_ne(engine)
sin, dotted_line = 1, 2
if engine == 'numexpr':
with tm.assertRaisesRegexp(NumExprClobberingError,
'Variables in expression .+'):
pd.eval('sin + dotted_line', engine=engine, parser=parser)
else:
res = pd.eval('sin + dotted_line', engine=engine, parser=parser)
tm.assert_equal(res, sin + dotted_line)
def test_numexpr_builtin_raises():
for engine, parser in ENGINES_PARSERS:
yield check_numexpr_builtin_raises, engine, parser
def check_bad_resolver_raises(engine, parser):
tm.skip_if_no_ne(engine)
cannot_resolve = 42, 3.0
with tm.assertRaisesRegexp(TypeError, 'Resolver of type .+'):
pd.eval('1 + 2', resolvers=cannot_resolve, engine=engine,
parser=parser)
def test_bad_resolver_raises():
for engine, parser in ENGINES_PARSERS:
yield check_bad_resolver_raises, engine, parser
def check_more_than_one_expression_raises(engine, parser):
tm.skip_if_no_ne(engine)
with tm.assertRaisesRegexp(SyntaxError,
'only a single expression is allowed'):
pd.eval('1 + 1; 2 + 2', engine=engine, parser=parser)
def test_more_than_one_expression_raises():
for engine, parser in ENGINES_PARSERS:
yield check_more_than_one_expression_raises, engine, parser
def check_bool_ops_fails_on_scalars(gen, lhs, cmp, rhs, engine, parser):
tm.skip_if_no_ne(engine)
mid = gen[type(lhs)]()
ex1 = 'lhs {0} mid {1} rhs'.format(cmp, cmp)
ex2 = 'lhs {0} mid and mid {1} rhs'.format(cmp, cmp)
ex3 = '(lhs {0} mid) & (mid {1} rhs)'.format(cmp, cmp)
for ex in (ex1, ex2, ex3):
with tm.assertRaises(NotImplementedError):
pd.eval(ex, engine=engine, parser=parser)
def test_bool_ops_fails_on_scalars():
_bool_ops_syms = 'and', 'or'
dtypes = int, float
gen = {int: lambda: np.random.randint(10), float: np.random.randn}
for engine, parser, dtype1, cmp, dtype2 in product(_engines, expr._parsers,
dtypes, _bool_ops_syms,
dtypes):
yield (check_bool_ops_fails_on_scalars, gen, gen[dtype1](), cmp,
gen[dtype2](), engine, parser)
def check_inf(engine, parser):
tm.skip_if_no_ne(engine)
s = 'inf + 1'
expected = np.inf
result = pd.eval(s, engine=engine, parser=parser)
tm.assert_equal(result, expected)
def test_inf():
for engine, parser in ENGINES_PARSERS:
yield check_inf, engine, parser
def check_negate_lt_eq_le(engine, parser):
tm.skip_if_no_ne(engine)
df = pd.DataFrame([[0, 10], [1, 20]], columns=['cat', 'count'])
expected = df[~(df.cat > 0)]
result = df.query('~(cat > 0)', engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
if parser == 'python':
with tm.assertRaises(NotImplementedError):
df.query('not (cat > 0)', engine=engine, parser=parser)
else:
result = df.query('not (cat > 0)', engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
def test_negate_lt_eq_le():
for engine, parser in product(_engines, expr._parsers):
yield check_negate_lt_eq_le, engine, parser
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
liuwenf/moose | python/mooseutils/VectorPostprocessorReader.py | 8 | 7773 | import os
import glob
import pandas
import bisect
from MooseDataFrame import MooseDataFrame
import message
class VectorPostprocessorReader(object):
"""
A Reader for MOOSE VectorPostprocessor data.
Args:
pattern[str]: A pattern of files (for use with glob) for loading.
MOOSE outputs VectorPostprocessor data in separate files for each timestep, using the timestep as a prefix. For
example: file_000.csv, file_001.csv, etc.
Therefore, a pattern acceptable for use with the python glob package must be supplied. For the above files,
"file_*.csv" should be supplied.
This object manages the loading and unloading of data and should always be in a valid state, regardless of the
existence of a file. It will also append new data and remove old/deleted data on subsequent calls to "update()".
"""
#: Status flags for loading/reloading/removing csv files (see "_modified").
NO_CHANGE = 0
NEW_DATA = 1
OLD_DATA = 2
def __init__(self, pattern, run_start_time=None):
self.filename = pattern
self._timedata = MooseDataFrame(self.filename.replace('*', 'time'), run_start_time=None, index='timestep')
self._modified_times = dict()
#self._run_start_time = run_start_time
self.data = pandas.Panel()
self.update()
self._minimum_modified = 0.0#self._run_start_time if self._run_start_time else 0.0
def __call__(self, keys, time=None, exact=False, **kwargs):
"""
Operator() returns the latest time or the desired time.
Args:
keys[str|list]: The key(s) to return.
time[float]: The time at which the data should be returned.
exact[bool]: When the time supplied is not an exact match, if 'exact=False' is provided the nearest time
less than the provided time is returned, when false an empty DataFrame is returned.
"""
# Return the latest time
if time == None:
return self.data.iloc[-1][keys]
# Return the specified time
elif time in self.data.keys().values:
return self.data[time][keys]
# Time not found and 'exact=True'
elif exact:
return pandas.DataFrame()
# Time not found and 'exact=False'
else:
times = self.data.keys()
n = len(times)
idx = bisect.bisect_right(times, time) - 1
if idx < 0:
idx = 0
elif idx > n:
idx = -1
return self.data.iloc[idx][keys]
def __getitem__(self, key):
"""
Column based access to VectorPostprocessor data.
Args:
key[str]: A VectorPostprocessor name.
Returns:
pandas.DataFrame containing the data for all available times (column).
"""
if self.data.empty:
return pandas.DataFrame()
else:
return self.data.minor_xs(key)
def __nonzero__(self):
"""
Allows this object to be used in boolean cases.
Example:
data = VectorPostprocessorReader('files_*.csv')
if not data:
print 'No data found!'
"""
return not self.data.empty
def __contains__(self, variable):
"""
Returns true if the variable exists in the data structure.
"""
return variable in self.variables()
def times(self):
"""
Returns the list of available time indices contained in the data.
"""
return list(self.data.keys().values)
def clear(self):
"""
Remove all data.
"""
self.data = pandas.Panel()
self._modified_times = dict()
self._minimum_modified = 0.0# self._run_start_time if self._run_start_time else 0.0
def variables(self):
"""
Return a list of postprocessor variable names listed in the reader.
"""
return self.data.axes[2]
def update(self):
"""
Update data by adding/removing files.
"""
# Return code (1 = something changed)
retcode = 0
# Update the time data file
self._timedata.update()
# The current filenames, time index, and modified status
filenames, indices, modified = self._filenames()
# Clear the data if empty
if not filenames:
self.clear()
return 1
# Loop through the filenames
for fname, index, mod in zip(filenames, indices, modified):
if mod == VectorPostprocessorReader.NEW_DATA:
try:
df = pandas.read_csv(fname)
except:
message.mooseWarning('The file {} failed to load, it is likely empty.'.format(fname))
continue
df.insert(0, 'index (Peacock)', pandas.Series(df.index, index=df.index))
if self.data.empty:
self.data = pandas.Panel({index:df})
else:
self.data[index] = df
retcode = 1
elif (mod == VectorPostprocessorReader.OLD_DATA) and (index in self.data.keys()):
self.data.pop(index)
retcode = 1
# Remove missing files
for key in self.data.keys():
if key not in indices:
self.data.pop(key)
retcode = 1
return retcode
def repr(self):
"""
Return components for building script.
Returns:
(output, imports) The necessary script and include statements to re-create data load.
"""
imports = ['import mooseutils']
output = ['\n# Read VectorPostprocessor Data']
output += ['data = mooseutils.VectorPostprocessorReader({})'.format(repr(self.filename))]
return output, imports
def _filenames(self):
"""
Returns the available filenames, time index, and modified status. (protected)
"""
# The list of files from the supplied pattern
filenames = sorted(glob.glob(self.filename))
# Remove the "_time.csv" from the list, if it exists
try:
filenames.remove(self._timedata.filename)
except:
pass
# Update the minimum modified time
if len(filenames) > 0:
self._minimum_modified = os.path.getmtime(filenames[0])
else:
self._minimum_modified = 0
# Determine the time index and modified status
indices, modified = [], []
for fname in filenames:
indices.append(self._time(fname))
modified.append(self._modified(fname))
return filenames, indices, modified
def _modified(self, filename):
"""
Determine the modified status of a filename. (protected)
"""
modified = os.path.getmtime(filename)
if modified < self._minimum_modified:
self._modified_times.pop(filename, None)
return VectorPostprocessorReader.OLD_DATA
elif (filename not in self._modified_times) or (modified > self._modified_times[filename]):
self._modified_times[filename] = os.path.getmtime(filename)
return VectorPostprocessorReader.NEW_DATA
return VectorPostprocessorReader.NO_CHANGE
def _time(self, filename):
"""
Determine the time index. (protected)
"""
idx = filename.rfind('_') + 1
tstep = int(filename[idx:-4])
if not self._timedata:
return tstep
else:
try:
return self._timedata['time'].loc[tstep]
except Exception:
return tstep
| lgpl-2.1 |
ebachelet/pyLIMA | pyLIMA/microlsimulator.py | 1 | 13901 | import numpy as np
import astropy
from astropy.coordinates import SkyCoord, EarthLocation, AltAz, get_sun, get_moon
from astropy.time import Time
import matplotlib.pyplot as plt
from pyLIMA import microlmodels
from pyLIMA import microltoolbox
from pyLIMA import telescopes
from pyLIMA import event
from pyLIMA import microlmagnification
RED_NOISE = 'Yes'
SOURCE_MAGNITUDE = [14, 22]
BLEND_LIMITS = [0, 1]
EXPOSURE_TIME = 50 #seconds
def moon_illumination(sun, moon):
"""The moon illumination expressed as a percentage.
:param astropy sun: the sun ephemeris
:param astropy moon: the moon ephemeris
:return: a numpy array indicated the moon illumination.
:rtype: array_like
"""
geocentric_elongation = sun.separation(moon).rad
selenocentric_elongation = np.arctan2(sun.distance * np.sin(geocentric_elongation),
moon.distance - sun.distance * np.cos(geocentric_elongation))
illumination = (1 + np.cos(selenocentric_elongation)) / 2.0
return illumination
def poisson_noise(flux):
"""The Poisson noise.
:param array_like flux: the observed flux
:return: a numpy array which represents the Poisson noise,
:rtype: array_like
"""
error_flux = flux ** 0.5
return error_flux
def noisy_observations(flux, error_flux):
"""Add Poisson noise to observations.
:param array_like flux: the observed flux
:param array_like error_flux: the error on observed flux
:return: a numpy array which represents the observed noisy flux
:rtype: array_like
"""
try:
flux_observed = np.random.poisson(flux)
except:
flux_observed = flux
return flux_observed
def time_simulation(time_start, time_end, sampling, bad_weather_percentage):
""" Simulate observing time during the observing windows, rejecting windows with bad weather.
:param float time_start: the start of observations in JD
:param float time_end: the end of observations in JD
:param float sampling: the number of points observed per hour.
:param float bad_weather_percentage: the percentage of bad nights
:return: a numpy array which represents the time of observations
:rtype: array_like
"""
total_number_of_days = int(time_end - time_start)
time_step_observations = sampling / 24.0
number_of_day_exposure = int(np.floor(
1.0 / time_step_observations)) # less than expected total, more likely in a telescope :)
night_begin = time_start
time_observed = []
for i in range(total_number_of_days):
good_weather = np.random.uniform(0, 1)
if good_weather > bad_weather_percentage:
random_begin_of_the_night = 0
night_end = night_begin + 1
time_observed += np.linspace(night_begin + time_step_observations + random_begin_of_the_night, night_end,
number_of_day_exposure).tolist()
night_begin += 1
time_of_observations = np.array(time_observed)
return time_of_observations
def red_noise(time):
""" Simulate red moise as a sum of 10 low amplitudes/period sinusoidals.
:param array_like time: the time in JD where you simulate red noise
:return: a numpy array which represents the red noise
:rtype: array_like
"""
red_noise_amplitude = np.random.random_sample(10) * 0.5 / 100
red_noise_period = np.random.random_sample(10)
red_noise_phase = np.random.random_sample(10) * 2 * np.pi
red_noise = 0
for j in range(10):
red_noise += np.sin(2 * np.pi * time / red_noise_period[j] + red_noise_phase[j]) * red_noise_amplitude[j]
return red_noise
def simulate_a_microlensing_event(name='Microlensing pyLIMA simulation', ra=270, dec=-30):
""" Simulate a microlensing event. More details in the event module.
:param str name: the name of the event. Default is 'Microlensing pyLIMA simulation'
:param float ra: the right ascension in degrees of your simulation. Default is 270.
:param float dec: the declination in degrees of your simulation. Default is -30.
:return: a event object
:rtype: object
"""
fake_event = event.Event()
fake_event.name = name
fake_event.ra = ra
fake_event.dec = dec
return fake_event
def simulate_a_telescope(name, event, time_start, time_end, sampling, location, filter, uniform_sampling=False,
altitude=0, longitude=0, latitude=0, spacecraft_name=None, bad_weather_percentage=0.0,
minimum_alt=20, moon_windows_avoidance=20, maximum_moon_illumination=100.0):
""" Simulate a telescope. More details in the telescopes module. The observations simulation are made for the
full time windows, then limitation are applied :
- Sun has to be below horizon : Sun< -18
- Moon has to be more than the moon_windows_avoidance distance from the target
- Observations altitude of the target have to be bigger than minimum_alt
:param str name: the name of the telescope.
:param object event: the microlensing event you look at
:param float time_start: the start of observations in JD
:param float time_end: the end of observations in JD
:param float sampling: the hour sampling.
:param str location: the location of the telescope.
:param str filter: the filter used for observations
:param boolean uniform_sampling: set it to True if you want no bad weather, no moon avoidance etc....
:param float altitude: the altitude in meters if the telescope
:param float longitude: the longitude in degree of the telescope location
:param float latitude: the latitude in degree of the telescope location
:param str spacecraft_name: the name of your satellite according to JPL horizons
:param float bad_weather_percentage: the percentage of bad nights
:param float minimum_alt: the minimum altitude ini degrees that your telescope can go to.
:param float moon_windows_avoidance: the minimum distance in degrees accepted between the target and the Moon
:param float maximum_moon_illumination: the maximum Moon brightness you allow in percentage
:return: a telescope object
:rtype: object
"""
#import pdb; pdb.set_trace()
# fake lightcurve
if (uniform_sampling == False) & (location != 'Space'):
earth_location = EarthLocation(lon=longitude * astropy.units.deg,
lat=latitude * astropy.units.deg,
height=altitude * astropy.units.m)
target = SkyCoord(event.ra, event.dec, unit='deg')
minimum_sampling = min(4.0, sampling)
ratio_sampling = np.round(sampling / minimum_sampling)
time_of_observations = time_simulation(time_start, time_end, minimum_sampling,
bad_weather_percentage)
time_convertion = Time(time_of_observations, format='jd').isot
telescope_altaz = target.transform_to(AltAz(obstime=time_convertion, location=earth_location))
altazframe = AltAz(obstime=time_convertion, location=earth_location)
Sun = get_sun(Time(time_of_observations, format='jd')).transform_to(altazframe)
Moon = get_moon(Time(time_of_observations, format='jd')).transform_to(altazframe)
Moon_illumination = moon_illumination(Sun, Moon)
Moon_separation = target.separation(Moon)
observing_windows = np.where((telescope_altaz.alt > minimum_alt * astropy.units.deg)
& (Sun.alt < -18 * astropy.units.deg)
& (Moon_separation > moon_windows_avoidance * astropy.units.deg)
& (Moon_illumination < maximum_moon_illumination)
)[0]
time_of_observations = time_of_observations[observing_windows]
else:
time_of_observations = np.arange(time_start, time_end, sampling / (24.0))
lightcurveflux = np.ones((len(time_of_observations), 3)) * 42
lightcurveflux[:, 0] = time_of_observations
telescope = telescopes.Telescope(name=name, camera_filter=filter, light_curve_flux=lightcurveflux,
location=location, spacecraft_name=spacecraft_name)
return telescope
def simulate_a_microlensing_model(event, model='PSPL', args=(), parallax=['None', 0.0], xallarap=['None'],
orbital_motion=['None', 0.0], source_spots='None'):
""" Simulate a a microlensing model.
:param object event: the microlensing event you look at. More details in event module
:param str model: the microlensing model you want. Default is 'PSPL'. More details in microlmodels module
:param array_like parallax: the parallax effect you want to add. Default is no parallax.
More details in microlmodels module
:param array_like xallarap: the xallarap effect you want to add. Default is no parallax.
More details in microlmodels module
:param str source_spots: If you want to add source spots. Default is no source_spots.
More details in microlmodels module
:return: a microlmodel object
:rtype: object
"""
fake_model = microlmodels.create_model(model, event, args, parallax, xallarap,
orbital_motion, source_spots)
fake_model.define_model_parameters()
return fake_model
def simulate_microlensing_model_parameters(model):
""" Simulate parameters given the desired model. Parameters are selected in uniform distribution inside
parameters_boundaries given by the microlguess modules. The exception is 'to' where it is selected
to enter inside telescopes observations.
:param object event: the microlensing event you look at. More details in event module
:return: fake_parameters, a set of parameters
:rtype: list
"""
fake_parameters = []
for key in list(model.pyLIMA_standards_dictionnary.keys())[:len(model.parameters_boundaries)]:
if key == 'to':
minimum_acceptable_time = max([min(i.lightcurve_flux[:, 0]) for i in model.event.telescopes])
maximum_acceptable_time = min([max(i.lightcurve_flux[:, 0]) for i in model.event.telescopes])
fake_parameters.append(np.random.uniform(minimum_acceptable_time, maximum_acceptable_time))
else:
boundaries = model.parameters_boundaries[model.pyLIMA_standards_dictionnary[key]]
fake_parameters.append(np.random.uniform(boundaries[0], boundaries[1]))
if model.model_type == 'FSPL':
if np.abs(fake_parameters[1]) > 0.1:
fake_parameters[1] /= 100
if np.abs(fake_parameters[1] / fake_parameters[3]) > 10:
fake_parameters[1] = np.abs(fake_parameters[1]) * np.random.uniform(0, fake_parameters[3])
if model.model_type == 'DSPL':
if np.abs(fake_parameters[2]) > 100:
fake_parameters[2] = np.random.uniform(10, 15)
return fake_parameters
def simulate_fluxes_parameters(list_of_telescopes):
""" Simulate flux parameters (magnitude_source , g) for the telescopes. More details in microlmodels module
:param list list_of_telescopes: a list of telescopes object
:return: fake_fluxes parameters, a set of fluxes parameters
:rtype: list
"""
fake_fluxes_parameters = []
for telescope in list_of_telescopes:
magnitude_source = np.random.uniform(SOURCE_MAGNITUDE[0], SOURCE_MAGNITUDE[1])
flux_source = microltoolbox.magnitude_to_flux(magnitude_source)
blending_ratio = np.random.uniform(BLEND_LIMITS[0], BLEND_LIMITS[1])
fake_fluxes_parameters.append(flux_source)
fake_fluxes_parameters.append(blending_ratio)
return fake_fluxes_parameters
def simulate_lightcurve_flux(model, pyLIMA_parameters, red_noise_apply='Yes'):
""" Simulate the flux of telescopes given a model and a set of parameters.
It updates straight the telescopes object inside the given model.
:param object model: the microlensing model you desire. More detail in microlmodels.
:param object pyLIMA_parameters: the parameters used to simulate the flux.
:param str red_noise_apply: to include or not red_noise
"""
count = 0
for telescope in model.event.telescopes:
theoritical_flux = model.compute_the_microlensing_model(telescope, pyLIMA_parameters)[0]
if np.min(theoritical_flux > 0):
pass
else:
microlmagnification.VBB.Tol = 0.0005
microlmagnification.VBB.RelTol = 0.0005
theoritical_flux = model.compute_the_microlensing_model(telescope, pyLIMA_parameters)[0]
microlmagnification.VBB.Tol = 0.001
microlmagnification.VBB.RelTol = 0.001
flux_error = poisson_noise(theoritical_flux)
observed_flux = noisy_observations(theoritical_flux*EXPOSURE_TIME, flux_error)
if red_noise_apply == 'Yes':
red = red_noise(telescope.lightcurve_flux[:, 0])
redded_flux = (1 - np.log(10) / 2.5 * red) * observed_flux
error_on_redded_flux = poisson_noise(redded_flux)
else:
redded_flux = observed_flux
error_on_redded_flux = poisson_noise(redded_flux)
redded_flux = redded_flux/EXPOSURE_TIME
error_on_redded_flux = error_on_redded_flux/EXPOSURE_TIME
telescope.lightcurve_flux[:, 1] = redded_flux
telescope.lightcurve_flux[:, 2] = error_on_redded_flux
telescope.lightcurve_magnitude = telescope.lightcurve_in_magnitude()
count += 1
| gpl-3.0 |
LabMagUBO/StoneX | concepts/energy_landscape/energy_landscape.py | 1 | 7356 | #!/opt/local/bin/ipython-3.4
# -*- coding: utf-8 -*-
import sys
import numpy as np
import numpy.ma as ma
import scipy.ndimage as nd
from scipy import optimize
from matplotlib import pyplot as pl
# Fonction
def new_plot():
fig = pl.figure()
return fig, fig.add_subplot(111, aspect='equal')
def draw(Z, axe):
ax.imshow(E, interpolation = 'nearest')
C = ax.contour(E, 10, colors='black', linewidth=.5)
ax.clabel(C, inline=1, fontsize=10)
def export(name,fig):
fig.savefig(name)
pl.close()
# Expression de la fonction
# X est un vecteur : X[alpha, theta]
# f comprix entre -1 et 1
# X aussi
X_shift = np.array([0, 0])
#fX = np.array([1, 2])
H = -2
J = 1
Kaf = 2
T = 0.15
energy = lambda X: -H * np.cos(2*np.pi * (X[1] - X_shift[1])) +\
np.sin(2*np.pi * (X[1] - X_shift[1]))**2 +\
Kaf * np.sin(2*np.pi * (X[0] - X_shift[0]))**2 - J * np.cos(2*np.pi * (X[1] - X[0]))
#energy = lambda X: X[1]
#energy = lambda X: np.cos(2 * np.pi * fX[0] * (X[0] - X_shift[0])) + np.cos(2 * np.pi * (X[0] - X_shift[0])) np.cos(2 * np.pi * fX[1] * (X[1] - X_shift[1]))
#energy = lambda X: X[1]#np.sin(2 * np.pi * X[0])
# Discretization [theta, alpha]
nb = np.array([2**8, 2**8])#2**6
theta = np.linspace(0, 1, nb[1])
alpha = np.linspace(0, 1, nb[0])
# Départ [theta, alpha]:
X_ini = np.array([0, 0])
#X_ini = np.array([0.5, 0])
# Finding minimum
N_ini = np.floor(X_ini * nb) % nb
#X_eq = optimize.fmin_bfgs(energy, X_ini, disp=True)
print("Position i, j initiale : {}".format(N_ini))
#Theta, Alpha = np.meshgrid(theta, alpha)
Theta, Alpha = np.meshgrid(theta, alpha)
E = energy([Alpha, Theta])
# On recherche l'équilibre
def search_eq(N_ini, E):
found = False
k = 0
nb = np.shape(E)
print("Shape", nb)
print("Boucle while")
N_eq = np.zeros((1, 2))
N_eq[0] = N_ini
while not found:
k = k+1
print("Boucle n", k)
E_ma = np.ma.array(E)
E_ma.mask = True
for i in np.arange(-1, 2) + N_eq[k-1][0]:
for j in np.arange(-1, 2) + N_eq[k-1][1]:
E_ma.mask[i % nb[0], j % nb[1]] = False
E_ma[N_ini[0], N_ini[1]] = ma.masked
print("indice du minimum", E_ma.argmin())
N_eq = np.append(N_eq, [[np.floor(E_ma.argmin() / nb[1]), E_ma.argmin() % nb[1]]], axis=0)
#print("N_eq :", N_eq)
print("mouvement ({0},{1})".format(int(N_eq[k][0]-N_eq[k-1][0]), int(N_eq[k][1]-N_eq[k-1][1])))
#print(E_ma)
#print(E_ma.mask)
print("Ini", N_eq[k-1], "eq", N_eq[k])
E_ini = E[N_eq[k-1][0], N_eq[k-1][1]]
E_eq = E[N_eq[k][0], N_eq[k][1]]
print("Énergie", E_ini, E_eq)
if E_eq < E_ini:
print("better")
elif (E_eq == E_ini):
print("egualité")
# On regarde si on est pas déjà passé par là
if k < 2:
continue
elif (N_eq[k] == N_eq[k-2]).all():
print("already here")
print(E_ma)
return N_eq
else:
print("I stay")
print(E_eq - E_ini)
print(E_ma)
return N_eq
if k == 1000:
print("pas de convergence ", k)
break
return N_eq
N_eq = search_eq(N_ini, E)
#print("N_eq", N_eq)
X_eq = N_eq / nb
#print(X_eq)
#print("énergie")
#for i, val in enumerate(X_eq):
# print(energy(val))
#pl.rcParams['image.interpolation'] = 'none'
#pl.rcParams['image.resample'] = False
fig = pl.figure()
ax = fig.add_subplot(111, aspect='equal')
ax.set_xlabel('X[1], theta')
ax.set_ylabel('X[0], alpha')
#ax.contourf(Theta, Alpha, E, cmap=pl.cm.gray, resample=False)
cax = ax.imshow(E,
interpolation = 'nearest',
#origin='upper',
extent=(0,1,1,0)
)
cbar = fig.colorbar(cax)
C = ax.contour(E, 30,
colors='black',
linewidth=.5,
extent=(0,1,0,1)
)
ax.clabel(C, inline=1, fontsize=10)
pl.grid()
ax.plot(X_ini[1] % 1, X_ini[0] % 1, 'ro', label="Ini.")
ax.plot(X_eq[1:-1, 1] % 1, X_eq[1:-1, 0] % 1, 'go', label="Recherche")
ax.plot(X_eq[-1, 1] % 1, X_eq[-1, 0] % 1, 'mo', label="Eq.")
ax.legend()
export('landscape.pdf', fig)
# On oublie les états intermédiaires
X_eq = X_eq[-1]
print("équilibre final", X_eq)
## On ajout un niveau
level = T + energy(X_eq)
mask = E <= level
E_ma = np.ma.array(E, mask=np.logical_not(mask))
fig = pl.figure()
ax = fig.add_subplot(111, aspect='equal')
cax = ax.imshow(E_ma, interpolation = 'nearest', origin='upper', extent=(0,1,1,0))
cbar = fig.colorbar(cax)
#cbar.ax.set_yticklabels(['< -1', '0', '> 1'])# vertically oriented colorbar
#ax.imshow(mask, cmap=pl.cm.gray, alpha=0.7, interpolation = 'nearest')
C = ax.contour(E_ma, 10, colors='black', linewidth=.5, extent=(0,1,0,1))
ax.clabel(C, inline=1, fontsize=10)
#ax.imshow(E_ma, interpolation = 'nearest')
#C = ax.contour(E_ma, 10, colors='black', linewidth=.5)
#ax.clabel(C, inline=1, fontsize=10)
ax.plot(X_eq[1] % 1, X_eq[0] % 1, 'mo', label="Eq.")
pl.savefig('landscape_flooded.pdf')
pl.close()
### On numérote les zones
z_lab, z_num = nd.measurements.label(mask)
print("Nombre de zones : {}".format(z_num))
print(z_lab)
fig = pl.figure()
ax = fig.add_subplot(111, aspect='equal')
cax = ax.imshow(z_lab, interpolation = 'nearest', origin='upper', extent=(0,1,1,0))
cbar = fig.colorbar(cax)
pl.savefig('zones.pdf')
pl.close()
#### On recherche les zones communes
#nb est la longueur des tableau
# On scanne le bord theta du table
zones_equiv = set()
for i in np.arange(nb[0]):
# On regarde si on est sur une zone «à risque» de chaque coté
left = z_lab[i, 0]
right = z_lab[i, -1]
if left and right and (left != right):
zones_equiv.add(tuple((min(left, right), max(left, right))))
# On regroupe les zones, en les rangeant par ordre croissant :
zones_equiv = np.sort(np.array(list(zones_equiv)), axis=0)
for i, val in enumerate(zones_equiv):
# On retire l'élément de droite
z_rm = z_lab == val[1]
#pour le remplacer par l'indice de gauche
z_lab[z_rm] = val[0]
# On scanne le bord alpha du table
zones_equiv = set()
for i in np.arange(nb[1]):
# On regarde si on est sur une zone «à risque» de chaque coté
left = z_lab[0, i]
right = z_lab[-1, i]
if left and right and (left != right):
zones_equiv.add(tuple((min(left, right), max(left, right))))
# On regroupe les zones, en les rangeant par ordre croissant :
zones_equiv = np.sort(np.array(list(zones_equiv)), axis=0)
for i, val in enumerate(zones_equiv):
# On retire l'élément de droite
z_rm = z_lab == val[1]
#pour le remplacer par l'indice de gauche
z_lab[z_rm] = val[0]
labels = np.unique(z_lab)
z_lab = np.searchsorted(labels, z_lab)
fig = pl.figure()
ax = fig.add_subplot(111, aspect='equal')
cax = ax.imshow(z_lab, interpolation = 'nearest', origin='upper', extent=(0,1,1,0))
cbar = fig.colorbar(cax)
pl.savefig('zones_period.pdf')
pl.close()
## selon le départ, on crée un tableau masqué
N_eq = X_eq * nb
lab = z_lab[N_eq[0], N_eq[1]]
E_ma = ma.array(E, mask=z_lab != lab)
fig = pl.figure()
ax = fig.add_subplot(111, aspect='equal')
cax = ax.imshow(E_ma, interpolation = 'nearest', origin='upper', extent=(0,1,1,0))
cbar = fig.colorbar(cax)
pl.savefig('zone_accessible.pdf')
pl.close()
#M = ma.sum((np.cos(theta)*E_ma.T).T) / ma.mean(E_ma)
#print(M)
| gpl-3.0 |
CodeMonkeyJan/hyperspy | hyperspy/_signals/signal1d.py | 1 | 56343 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
import math
import matplotlib.pyplot as plt
import numpy as np
import dask.array as da
import scipy.interpolate
import scipy as sp
from scipy.signal import savgol_filter
from scipy.ndimage.filters import gaussian_filter1d
try:
from statsmodels.nonparametric.smoothers_lowess import lowess
statsmodels_installed = True
except:
statsmodels_installed = False
from hyperspy.signal import BaseSignal
from hyperspy._signals.common_signal1d import CommonSignal1D
from hyperspy.signal_tools import SpikesRemoval
from hyperspy.models.model1d import Model1D
from hyperspy.misc.utils import stack, signal_range_from_roi
from hyperspy.defaults_parser import preferences
from hyperspy.external.progressbar import progressbar
from hyperspy._signals.lazy import lazyerror
from hyperspy.signal_tools import (
Signal1DCalibration,
SmoothingSavitzkyGolay,
SmoothingLowess,
SmoothingTV,
ButterworthFilter)
from hyperspy.ui_registry import get_gui, DISPLAY_DT, TOOLKIT_DT
from hyperspy.misc.tv_denoise import _tv_denoise_1d
from hyperspy.signal_tools import BackgroundRemoval
from hyperspy.decorators import interactive_range_selector
from hyperspy.signal_tools import IntegrateArea
from hyperspy import components1d
from hyperspy._signals.lazy import LazySignal
_logger = logging.getLogger(__name__)
def find_peaks_ohaver(y, x=None, slope_thresh=0., amp_thresh=None,
medfilt_radius=5, maxpeakn=30000, peakgroup=10,
subchannel=True,):
"""Find peaks along a 1D line.
Function to locate the positive peaks in a noisy x-y data set.
Detects peaks by looking for downward zero-crossings in the first
derivative that exceed 'slope_thresh'.
Returns an array containing position, height, and width of each peak.
Sorted by position.
'slope_thresh' and 'amp_thresh', control sensitivity: higher values will
neglect smaller features.
Parameters
---------
y : array
1D input array, e.g. a spectrum
x : array (optional)
1D array describing the calibration of y (must have same shape as y)
slope_thresh : float (optional)
1st derivative threshold to count the peak
default is set to 0.5
higher values will neglect smaller features.
amp_thresh : float (optional)
intensity threshold above which
default is set to 10% of max(y)
higher values will neglect smaller features.
medfilt_radius : int (optional)
median filter window to apply to smooth the data
(see scipy.signal.medfilt)
if 0, no filter will be applied.
default is set to 5
peakgroup : int (optional)
number of points around the "top part" of the peak
default is set to 10
maxpeakn : int (optional)
number of maximum detectable peaks
default is set to 30000
subchannel : bool (optional)
default is set to True
Returns
-------
P : structured array of shape (npeaks) and fields: position, width, height
contains position, height, and width of each peak
Examples
--------
>>> x = np.arange(0,50,0.01)
>>> y = np.cos(x)
>>> peaks = find_peaks_ohaver(y, x, 0, 0)
Notes
-----
Original code from T. C. O'Haver, 1995.
Version 2 Last revised Oct 27, 2006 Converted to Python by
Michael Sarahan, Feb 2011.
Revised to handle edges better. MCS, Mar 2011
"""
if x is None:
x = np.arange(len(y), dtype=np.int64)
if not amp_thresh:
amp_thresh = 0.1 * y.max()
peakgroup = np.round(peakgroup)
if medfilt_radius:
d = np.gradient(scipy.signal.medfilt(y, medfilt_radius))
else:
d = np.gradient(y)
n = np.round(peakgroup / 2 + 1)
peak_dt = np.dtype([('position', np.float),
('height', np.float),
('width', np.float)])
P = np.array([], dtype=peak_dt)
peak = 0
for j in range(len(y) - 4):
if np.sign(d[j]) > np.sign(d[j + 1]): # Detects zero-crossing
if np.sign(d[j + 1]) == 0:
continue
# if slope of derivative is larger than slope_thresh
if d[j] - d[j + 1] > slope_thresh:
# if height of peak is larger than amp_thresh
if y[j] > amp_thresh:
# the next section is very slow, and actually messes
# things up for images (discrete pixels),
# so by default, don't do subchannel precision in the
# 1D peakfind step.
if subchannel:
xx = np.zeros(peakgroup)
yy = np.zeros(peakgroup)
s = 0
for k in range(peakgroup):
groupindex = int(j + k - n + 1)
if groupindex < 1:
xx = xx[1:]
yy = yy[1:]
s += 1
continue
elif groupindex > y.shape[0] - 1:
xx = xx[:groupindex - 1]
yy = yy[:groupindex - 1]
break
xx[k - s] = x[groupindex]
yy[k - s] = y[groupindex]
avg = np.average(xx)
stdev = np.std(xx)
xxf = (xx - avg) / stdev
# Fit parabola to log10 of sub-group with
# centering and scaling
yynz = yy != 0
coef = np.polyfit(
xxf[yynz], np.log10(np.abs(yy[yynz])), 2)
c1 = coef[2]
c2 = coef[1]
c3 = coef[0]
with np.errstate(invalid='ignore'):
width = np.linalg.norm(stdev * 2.35703 /
(np.sqrt(2) * np.sqrt(-1 *
c3)))
# if the peak is too narrow for least-squares
# technique to work well, just use the max value
# of y in the sub-group of points near peak.
if peakgroup < 7:
height = np.max(yy)
position = xx[np.argmin(np.abs(yy - height))]
else:
position = - ((stdev * c2 / (2 * c3)) - avg)
height = np.exp(c1 - c3 * (c2 / (2 * c3)) ** 2)
# Fill results array P. One row for each peak
# detected, containing the
# peak position (x-value) and peak height (y-value).
else:
position = x[j]
height = y[j]
# no way to know peak width without
# the above measurements.
width = 0
if (not np.isnan(position) and 0 < position < x[-1]):
P = np.hstack((P,
np.array([(position, height, width)],
dtype=peak_dt)))
peak += 1
# return only the part of the array that contains peaks
# (not the whole maxpeakn x 3 array)
if len(P) > maxpeakn:
minh = np.sort(P['height'])[-maxpeakn]
P = P[P['height'] >= minh]
# Sorts the values as a function of position
P.sort(0)
return P
def interpolate1D(number_of_interpolation_points, data):
ip = number_of_interpolation_points
ch = len(data)
old_ax = np.linspace(0, 100, ch)
new_ax = np.linspace(0, 100, ch * ip - (ip - 1))
interpolator = scipy.interpolate.interp1d(old_ax, data)
return interpolator(new_ax)
def _estimate_shift1D(data, **kwargs):
mask = kwargs.get('mask', None)
ref = kwargs.get('ref', None)
interpolate = kwargs.get('interpolate', True)
ip = kwargs.get('ip', 5)
data_slice = kwargs.get('data_slice', slice(None))
if bool(mask):
return np.nan
data = data[data_slice]
if interpolate is True:
data = interpolate1D(ip, data)
return np.argmax(np.correlate(ref, data, 'full')) - len(ref) + 1
def _shift1D(data, **kwargs):
shift = kwargs.get('shift', 0.)
original_axis = kwargs.get('original_axis', None)
fill_value = kwargs.get('fill_value', np.nan)
kind = kwargs.get('kind', 'linear')
offset = kwargs.get('offset', 0.)
scale = kwargs.get('scale', 1.)
size = kwargs.get('size', 2)
if np.isnan(shift):
return data
axis = np.linspace(offset, offset + scale * (size - 1), size)
si = sp.interpolate.interp1d(original_axis,
data,
bounds_error=False,
fill_value=fill_value,
kind=kind)
offset = float(offset - shift)
axis = np.linspace(offset, offset + scale * (size - 1), size)
return si(axis)
class Signal1D(BaseSignal, CommonSignal1D):
"""
"""
_signal_dimension = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.axes_manager.signal_dimension != 1:
self.axes_manager.set_signal_dimension(1)
def _spikes_diagnosis(self, signal_mask=None,
navigation_mask=None):
"""Plots a histogram to help in choosing the threshold for
spikes removal.
Parameters
----------
signal_mask: boolean array
Restricts the operation to the signal locations not marked
as True (masked)
navigation_mask: boolean array
Restricts the operation to the navigation locations not
marked as True (masked).
See also
--------
spikes_removal_tool
"""
self._check_signal_dimension_equals_one()
dc = self.data
if signal_mask is not None:
dc = dc[..., ~signal_mask]
if navigation_mask is not None:
dc = dc[~navigation_mask, :]
der = np.abs(np.diff(dc, 1, -1))
n = ((~navigation_mask).sum() if navigation_mask else
self.axes_manager.navigation_size)
# arbitrary cutoff for number of spectra necessary before histogram
# data is compressed by finding maxima of each spectrum
tmp = BaseSignal(der) if n < 2000 else BaseSignal(
np.ravel(der.max(-1)))
# get histogram signal using smart binning and plot
tmph = tmp.get_histogram()
tmph.plot()
# Customize plot appearance
plt.gca().set_title('')
plt.gca().fill_between(tmph.axes_manager[0].axis,
tmph.data,
facecolor='#fddbc7',
interpolate=True,
color='none')
ax = tmph._plot.signal_plot.ax
axl = tmph._plot.signal_plot.ax_lines[0]
axl.set_line_properties(color='#b2182b')
plt.xlabel('Derivative magnitude')
plt.ylabel('Log(Counts)')
ax.set_yscale('log')
ax.set_ylim(10 ** -1, plt.ylim()[1])
ax.set_xlim(plt.xlim()[0], 1.1 * plt.xlim()[1])
plt.draw()
def spikes_removal_tool(self, signal_mask=None,
navigation_mask=None, display=True, toolkit=None):
"""Graphical interface to remove spikes from EELS spectra.
Parameters
----------
signal_mask: boolean array
Restricts the operation to the signal locations not marked
as True (masked)
navigation_mask: boolean array
Restricts the operation to the navigation locations not
marked as True (masked)
See also
--------
_spikes_diagnosis,
"""
self._check_signal_dimension_equals_one()
sr = SpikesRemoval(self,
navigation_mask=navigation_mask,
signal_mask=signal_mask)
return sr.gui(display=display, toolkit=toolkit)
spikes_removal_tool.__doc__ =\
"""Graphical interface to remove spikes from EELS spectra.
Parameters
----------
signal_mask: boolean array
Restricts the operation to the signal locations not marked
as True (masked)
navigation_mask: boolean array
Restricts the operation to the navigation locations not
marked as True (masked)
%s
%s
See also
--------
_spikes_diagnosis,
""" % (DISPLAY_DT, TOOLKIT_DT)
def create_model(self, dictionary=None):
"""Create a model for the current data.
Returns
-------
model : `Model1D` instance.
"""
model = Model1D(self, dictionary=dictionary)
return model
def shift1D(self,
shift_array,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
parallel=None,
show_progressbar=None):
"""Shift the data in place over the signal axis by the amount specified
by an array.
Parameters
----------
shift_array : numpy array
An array containing the shifting amount. It must have
`axes_manager._navigation_shape_in_array` shape.
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
crop : bool
If True automatically crop the signal axis at both ends if
needed.
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
parallel : {None, bool}
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Raises
------
SignalDimensionError if the signal dimension is not 1.
"""
if not np.any(shift_array):
# Nothing to do, the shift array if filled with zeros
return
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
# Figure out min/max shifts, and translate to shifts in index as well
minimum, maximum = np.nanmin(shift_array), np.nanmax(shift_array)
if minimum < 0:
ihigh = 1 + axis.value2index(
axis.high_value + minimum,
rounding=math.floor)
else:
ihigh = axis.high_index + 1
if maximum > 0:
ilow = axis.value2index(axis.offset + maximum,
rounding=math.ceil)
else:
ilow = axis.low_index
if expand:
if self._lazy:
ind = axis.index_in_array
pre_shape = list(self.data.shape)
post_shape = list(self.data.shape)
pre_chunks = list(self.data.chunks)
post_chunks = list(self.data.chunks)
pre_shape[ind] = axis.high_index - ihigh + 1
post_shape[ind] = ilow - axis.low_index
for chunks, shape in zip((pre_chunks, post_chunks),
(pre_shape, post_shape)):
maxsize = min(np.max(chunks[ind]), shape[ind])
num = np.ceil(shape[ind] / maxsize)
chunks[ind] = tuple(len(ar) for ar in
np.array_split(np.arange(shape[ind]),
num))
pre_array = da.full(tuple(pre_shape),
fill_value,
chunks=tuple(pre_chunks))
post_array = da.full(tuple(post_shape),
fill_value,
chunks=tuple(post_chunks))
self.data = da.concatenate((pre_array, self.data, post_array),
axis=ind)
else:
padding = []
for i in range(self.data.ndim):
if i == axis.index_in_array:
padding.append((axis.high_index - ihigh + 1,
ilow - axis.low_index))
else:
padding.append((0, 0))
self.data = np.pad(self.data, padding, mode='constant',
constant_values=(fill_value,))
axis.offset += minimum
axis.size += axis.high_index - ihigh + 1 + ilow - axis.low_index
self._map_iterate(_shift1D, (('shift', shift_array.ravel()),),
original_axis=axis.axis,
fill_value=fill_value,
kind=interpolation_method,
offset=axis.offset,
scale=axis.scale,
size=axis.size,
show_progressbar=show_progressbar,
parallel=parallel,
ragged=False)
if crop and not expand:
self.crop(axis.index_in_axes_manager,
ilow,
ihigh)
self.events.data_changed.trigger(obj=self)
def interpolate_in_between(self, start, end, delta=3, parallel=None,
show_progressbar=None, **kwargs):
"""Replace the data in a given range by interpolation.
The operation is performed in place.
Parameters
----------
start, end : {int | float}
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
delta : {int | float}
The windows around the (start, end) to use for interpolation
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
parallel: {None, bool}
All extra keyword arguments are passed to
scipy.interpolate.interp1d. See the function documentation
for details.
Raises
------
SignalDimensionError if the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
i1 = axis._get_index(start)
i2 = axis._get_index(end)
if isinstance(delta, float):
delta = int(delta / axis.scale)
i0 = int(np.clip(i1 - delta, 0, np.inf))
i3 = int(np.clip(i2 + delta, 0, axis.size))
def interpolating_function(dat):
dat_int = sp.interpolate.interp1d(
list(range(i0, i1)) + list(range(i2, i3)),
dat[i0:i1].tolist() + dat[i2:i3].tolist(),
**kwargs)
dat[i1:i2] = dat_int(list(range(i1, i2)))
return dat
self._map_iterate(interpolating_function, ragged=False,
parallel=parallel, show_progressbar=show_progressbar)
self.events.data_changed.trigger(obj=self)
def _check_navigation_mask(self, mask):
if mask is not None:
if not isinstance(mask, BaseSignal):
raise ValueError("mask must be a BaseSignal instance.")
elif mask.axes_manager.signal_dimension not in (0, 1):
raise ValueError("mask must be a BaseSignal "
"with signal_dimension equal to 1")
elif (mask.axes_manager.navigation_dimension !=
self.axes_manager.navigation_dimension):
raise ValueError("mask must be a BaseSignal with the same "
"navigation_dimension as the current signal.")
def estimate_shift1D(self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
mask=None,
parallel=None,
show_progressbar=None):
"""Estimate the shifts in the current signal axis using
cross-correlation.
This method can only estimate the shift by comparing
unidimensional features that should not change the position in
the signal axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient to
select the feature of interest providing sensible values for
`start` and `end`. By default interpolation is used to obtain
subpixel precision.
Parameters
----------
start, end : {int | float | None}
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
mask : BaseSignal of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
parallel : {None, bool}
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Returns
-------
An array with the result of the estimation in the axis units.
Raises
------
SignalDimensionError if the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
ip = number_of_interpolation_points + 1
axis = self.axes_manager.signal_axes[0]
self._check_navigation_mask(mask)
# we compute for now
if isinstance(start, da.Array):
start = start.compute()
if isinstance(end, da.Array):
end = end.compute()
i1, i2 = axis._get_index(start), axis._get_index(end)
if reference_indices is None:
reference_indices = self.axes_manager.indices
ref = self.inav[reference_indices].data[i1:i2]
if interpolate is True:
ref = interpolate1D(ip, ref)
iterating_kwargs = ()
if mask is not None:
iterating_kwargs += (('mask', mask),)
shift_signal = self._map_iterate(
_estimate_shift1D,
iterating_kwargs=iterating_kwargs,
data_slice=slice(i1, i2),
mask=None,
ref=ref,
ip=ip,
interpolate=interpolate,
ragged=False,
parallel=parallel,
inplace=False,
show_progressbar=show_progressbar,)
shift_array = shift_signal.data
if max_shift is not None:
if interpolate is True:
max_shift *= ip
shift_array.clip(-max_shift, max_shift)
if interpolate is True:
shift_array = shift_array / ip
shift_array *= axis.scale
return shift_array
def align1D(self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
also_align=None,
mask=None,
show_progressbar=None):
"""Estimate the shifts in the signal axis using
cross-correlation and use the estimation to align the data in place.
This method can only estimate the shift by comparing
unidimensional
features that should not change the position.
To decrease memory usage, time of computation and improve
accuracy it is convenient to select the feature of interest
setting the `start` and `end` keywords. By default interpolation is
used to obtain subpixel precision.
Parameters
----------
start, end : {int | float | None}
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
crop : bool
If True automatically crop the signal axis at both ends if
needed.
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
also_align : list of signals, None
A list of BaseSignal instances that has exactly the same
dimensions as this one and that will be aligned using the shift map
estimated using the this signal.
mask : BaseSignal of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Returns
-------
An array with the result of the estimation. The shift will be
Raises
------
SignalDimensionError if the signal dimension is not 1.
See also
--------
estimate_shift1D
"""
if also_align is None:
also_align = []
self._check_signal_dimension_equals_one()
if self._lazy:
_logger.warning('In order to properly expand, the lazy '
'reference signal will be read twice (once to '
'estimate shifts, and second time to shift '
'appropriatelly), which might take a long time. '
'Use expand=False to only pass through the data '
'once.')
shift_array = self.estimate_shift1D(
start=start,
end=end,
reference_indices=reference_indices,
max_shift=max_shift,
interpolate=interpolate,
number_of_interpolation_points=number_of_interpolation_points,
mask=mask,
show_progressbar=show_progressbar)
signals_to_shift = [self] + also_align
for signal in signals_to_shift:
signal.shift1D(shift_array=shift_array,
interpolation_method=interpolation_method,
crop=crop,
fill_value=fill_value,
expand=expand,
show_progressbar=show_progressbar)
def integrate_in_range(self, signal_range='interactive',
display=True, toolkit=None):
""" Sums the spectrum over an energy range, giving the integrated
area.
The energy range can either be selected through a GUI or the command
line.
Parameters
----------
signal_range : {a tuple of this form (l, r), "interactive"}
l and r are the left and right limits of the range. They can be
numbers or None, where None indicates the extremes of the interval.
If l and r are floats the `signal_range` will be in axis units (for
example eV). If l and r are integers the `signal_range` will be in
index units. When `signal_range` is "interactive" (default) the
range is selected using a GUI.
Returns
-------
integrated_spectrum : BaseSignal subclass
See Also
--------
integrate_simpson
Examples
--------
Using the GUI
>>> s = hs.signals.Signal1D(range(1000))
>>> s.integrate_in_range() #doctest: +SKIP
Using the CLI
>>> s_int = s.integrate_in_range(signal_range=(560,None))
Selecting a range in the axis units, by specifying the
signal range with floats.
>>> s_int = s.integrate_in_range(signal_range=(560.,590.))
Selecting a range using the index, by specifying the
signal range with integers.
>>> s_int = s.integrate_in_range(signal_range=(100,120))
"""
from hyperspy.misc.utils import deprecation_warning
msg = (
"The `Signal1D.integrate_in_range` method is deprecated and will "
"be removed in v2.0. Use a `roi.SpanRoi` followed by `integrate1D` "
"instead.")
deprecation_warning(msg)
signal_range = signal_range_from_roi(signal_range)
if signal_range == 'interactive':
self_copy = self.deepcopy()
ia = IntegrateArea(self_copy, signal_range)
ia.gui(display=display, toolkit=toolkit)
integrated_signal1D = self_copy
else:
integrated_signal1D = self._integrate_in_range_commandline(
signal_range)
return integrated_signal1D
def _integrate_in_range_commandline(self, signal_range):
signal_range = signal_range_from_roi(signal_range)
e1 = signal_range[0]
e2 = signal_range[1]
integrated_signal1D = self.isig[e1:e2].integrate1D(-1)
return integrated_signal1D
def calibrate(self, display=True, toolkit=None):
self._check_signal_dimension_equals_one()
calibration = Signal1DCalibration(self)
return calibration.gui(display=display, toolkit=toolkit)
calibrate.__doc__ = \
"""
Calibrate the spectral dimension using a gui.
It displays a window where the new calibration can be set by:
* Setting the offset, units and scale directly
* Selection a range by dragging the mouse on the spectrum figure
and
setting the new values for the given range limits
Parameters
----------
%s
%s
Notes
-----
For this method to work the output_dimension must be 1. Set the
view
accordingly
Raises
------
SignalDimensionError if the signal dimension is not 1.
""" % (DISPLAY_DT, TOOLKIT_DT)
def smooth_savitzky_golay(self,
polynomial_order=None,
window_length=None,
differential_order=0,
parallel=None, display=True, toolkit=None):
self._check_signal_dimension_equals_one()
if (polynomial_order is not None and
window_length is not None):
axis = self.axes_manager.signal_axes[0]
self.map(savgol_filter, window_length=window_length,
polyorder=polynomial_order, deriv=differential_order,
delta=axis.scale, ragged=False, parallel=parallel)
else:
# Interactive mode
smoother = SmoothingSavitzkyGolay(self)
smoother.differential_order = differential_order
if polynomial_order is not None:
smoother.polynomial_order = polynomial_order
if window_length is not None:
smoother.window_length = window_length
return smoother.gui(display=display, toolkit=toolkit)
smooth_savitzky_golay.__doc__ = \
"""
Apply a Savitzky-Golay filter to the data in place.
If `polynomial_order` or `window_length` or `differential_order` are
None the method is run in interactive mode.
Parameters
----------
window_length : int
The length of the filter window (i.e. the number of coefficients).
`window_length` must be a positive odd integer.
polynomial_order : int
The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
differential_order: int, optional
The order of the derivative to compute. This must be a
nonnegative integer. The default is 0, which means to filter
the data without differentiating.
parallel : {bool, None}
Perform the operation in a threaded manner (parallely).
%s
%s
Notes
-----
More information about the filter in `scipy.signal.savgol_filter`.
""" % (DISPLAY_DT, TOOLKIT_DT)
def smooth_lowess(self,
smoothing_parameter=None,
number_of_iterations=None,
show_progressbar=None,
parallel=None, display=True, toolkit=None):
if not statsmodels_installed:
raise ImportError("statsmodels is not installed. This package is "
"required for this feature.")
self._check_signal_dimension_equals_one()
if smoothing_parameter is None or number_of_iterations is None:
smoother = SmoothingLowess(self)
if smoothing_parameter is not None:
smoother.smoothing_parameter = smoothing_parameter
if number_of_iterations is not None:
smoother.number_of_iterations = number_of_iterations
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(lowess,
exog=self.axes_manager[-1].axis,
frac=smoothing_parameter,
it=number_of_iterations,
is_sorted=True,
return_sorted=False,
show_progressbar=show_progressbar,
ragged=False,
parallel=parallel)
smooth_lowess.__doc__ = \
"""
Lowess data smoothing in place.
If `smoothing_parameter` or `number_of_iterations` are None the method
is run in interactive mode.
Parameters
----------
smoothing_parameter: float or None
Between 0 and 1. The fraction of the data used
when estimating each y-value.
number_of_iterations: int or None
The number of residual-based reweightings
to perform.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
parallel : {Bool, None, int}
Perform the operation parallel
%s
%s
Raises
------
SignalDimensionError if the signal dimension is not 1.
ImportError if statsmodels is not installed.
Notes
-----
This method uses the lowess algorithm from statsmodels. statsmodels
is required for this method.
""" % (DISPLAY_DT, TOOLKIT_DT)
def smooth_tv(self, smoothing_parameter=None, show_progressbar=None,
parallel=None, display=True, toolkit=None):
self._check_signal_dimension_equals_one()
if smoothing_parameter is None:
smoother = SmoothingTV(self)
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(_tv_denoise_1d, weight=smoothing_parameter,
ragged=False,
show_progressbar=show_progressbar,
parallel=parallel)
smooth_tv.__doc__ = \
"""
Total variation data smoothing in place.
Parameters
----------
smoothing_parameter: float or None
Denoising weight relative to L2 minimization. If None the method
is run in interactive mode.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
parallel : {Bool, None, int}
Perform the operation parallely
%s
%s
Raises
------
SignalDimensionError if the signal dimension is not 1.
""" % (DISPLAY_DT, TOOLKIT_DT)
def filter_butterworth(self,
cutoff_frequency_ratio=None,
type='low',
order=2, display=True, toolkit=None):
self._check_signal_dimension_equals_one()
smoother = ButterworthFilter(self)
if cutoff_frequency_ratio is not None:
smoother.cutoff_frequency_ratio = cutoff_frequency_ratio
smoother.type = type
smoother.order = order
smoother.apply()
else:
return smoother.gui(display=display, toolkit=toolkit)
filter_butterworth.__doc__ = \
"""
Butterworth filter in place.
Parameters
----------
%s
%s
Raises
------
SignalDimensionError if the signal dimension is not 1.
""" % (DISPLAY_DT, TOOLKIT_DT)
def _remove_background_cli(
self, signal_range, background_estimator, fast=True,
show_progressbar=None):
signal_range = signal_range_from_roi(signal_range)
from hyperspy.models.model1d import Model1D
model = Model1D(self)
model.append(background_estimator)
background_estimator.estimate_parameters(
self,
signal_range[0],
signal_range[1],
only_current=False)
if not fast:
model.set_signal_range(signal_range[0], signal_range[1])
model.multifit(show_progressbar=show_progressbar)
return self - model.as_signal(show_progressbar=show_progressbar)
def remove_background(
self,
signal_range='interactive',
background_type='PowerLaw',
polynomial_order=2,
fast=True,
show_progressbar=None, display=True, toolkit=None):
signal_range = signal_range_from_roi(signal_range)
self._check_signal_dimension_equals_one()
if signal_range == 'interactive':
br = BackgroundRemoval(self)
return br.gui(display=display, toolkit=toolkit)
else:
if background_type == 'PowerLaw':
background_estimator = components1d.PowerLaw()
elif background_type == 'Gaussian':
background_estimator = components1d.Gaussian()
elif background_type == 'Offset':
background_estimator = components1d.Offset()
elif background_type == 'Polynomial':
background_estimator = components1d.Polynomial(
polynomial_order)
else:
raise ValueError(
"Background type: " +
background_type +
" not recognized")
spectra = self._remove_background_cli(
signal_range=signal_range,
background_estimator=background_estimator,
fast=fast,
show_progressbar=show_progressbar)
return spectra
remove_background.__doc__ = \
"""
Remove the background, either in place using a gui or returned as a new
spectrum using the command line.
Parameters
----------
signal_range : tuple, optional
If this argument is not specified, the signal range has to be
selected using a GUI. And the original spectrum will be replaced.
If tuple is given, the a spectrum will be returned.
background_type : string
The type of component which should be used to fit the background.
Possible components: PowerLaw, Gaussian, Offset, Polynomial
If Polynomial is used, the polynomial order can be specified
polynomial_order : int, default 2
Specify the polynomial order if a Polynomial background is used.
fast : bool
If True, perform an approximative estimation of the parameters.
If False, the signal is fitted using non-linear least squares
afterwards.This is slower compared to the estimation but
possibly more accurate.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
%s
%s
Examples
--------
Using gui, replaces spectrum s
>>> s = hs.signals.Signal1D(range(1000))
>>> s.remove_background() #doctest: +SKIP
Using command line, returns a spectrum
>>> s1 = s.remove_background(signal_range=(400,450), background_type='PowerLaw')
Using a full model to fit the background
>>> s1 = s.remove_background(signal_range=(400,450), fast=False)
Raises
------
SignalDimensionError if the signal dimension is not 1.
""" % (DISPLAY_DT, TOOLKIT_DT)
@interactive_range_selector
def crop_signal1D(self, left_value=None, right_value=None,):
"""Crop in place the spectral dimension.
Parameters
----------
left_value, righ_value: {int | float | None}
If int the values are taken as indices. If float they are
converted to indices using the spectral axis calibration.
If left_value is None crops from the beginning of the axis.
If right_value is None crops up to the end of the axis. If
both are
None the interactive cropping interface is activated
enabling
cropping the spectrum using a span selector in the signal
plot.
Raises
------
SignalDimensionError if the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
try:
left_value, right_value = signal_range_from_roi(left_value)
except TypeError:
# It was not a ROI, we carry on
pass
self.crop(axis=self.axes_manager.signal_axes[0].index_in_axes_manager,
start=left_value, end=right_value)
def gaussian_filter(self, FWHM):
"""Applies a Gaussian filter in the spectral dimension in place.
Parameters
----------
FWHM : float
The Full Width at Half Maximum of the gaussian in the
spectral axis units
Raises
------
ValueError if FWHM is equal or less than zero.
SignalDimensionError if the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if FWHM <= 0:
raise ValueError(
"FWHM must be greater than zero")
axis = self.axes_manager.signal_axes[0]
FWHM *= 1 / axis.scale
self.map(gaussian_filter1d, sigma=FWHM / 2.35482, ragged=False)
def hanning_taper(self, side='both', channels=None, offset=0):
"""Apply a hanning taper to the data in place.
Parameters
----------
side : {'left', 'right', 'both'}
channels : {None, int}
The number of channels to taper. If None 5% of the total
number of channels are tapered.
offset : int
Returns
-------
channels
Raises
------
SignalDimensionError if the signal dimension is not 1.
"""
# TODO: generalize it
self._check_signal_dimension_equals_one()
if channels is None:
channels = int(round(len(self()) * 0.02))
if channels < 20:
channels = 20
dc = self._data_aligned_with_axes
if self._lazy and offset != 0:
shp = dc.shape
if len(shp) == 1:
nav_shape = ()
nav_chunks = ()
else:
nav_shape = shp[:-1]
nav_chunks = dc.chunks[:-1]
zeros = da.zeros(nav_shape + (offset,),
chunks=nav_chunks + ((offset,),))
if side == 'left' or side == 'both':
if self._lazy:
tapered = dc[..., offset:channels + offset]
tapered *= np.hanning(2 * channels)[:channels]
therest = dc[..., channels + offset:]
thelist = [] if offset == 0 else [zeros]
thelist.extend([tapered, therest])
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., offset:channels + offset] *= (
np.hanning(2 * channels)[:channels])
dc[..., :offset] *= 0.
if side == 'right' or side == 'both':
rl = None if offset == 0 else -offset
if self._lazy:
therest = dc[..., :-channels - offset]
tapered = dc[..., -channels - offset:rl]
tapered *= np.hanning(2 * channels)[-channels:]
thelist = [therest, tapered]
if offset != 0:
thelist.append(zeros)
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., -channels - offset:rl] *= (
np.hanning(2 * channels)[-channels:])
if offset != 0:
dc[..., -offset:] *= 0.
if self._lazy:
self.data = dc
self.events.data_changed.trigger(obj=self)
return channels
def find_peaks1D_ohaver(self, xdim=None, slope_thresh=0, amp_thresh=None,
subchannel=True, medfilt_radius=5, maxpeakn=30000,
peakgroup=10, parallel=None):
"""Find peaks along a 1D line (peaks in spectrum/spectra).
Function to locate the positive peaks in a noisy x-y data set.
Detects peaks by looking for downward zero-crossings in the
first derivative that exceed 'slope_thresh'.
Returns an array containing position, height, and width of each
peak.
'slope_thresh' and 'amp_thresh', control sensitivity: higher
values will
neglect smaller features.
peakgroup is the number of points around the top peak to search
around
Parameters
---------
slope_thresh : float (optional)
1st derivative threshold to count the peak
default is set to 0.5
higher values will neglect smaller features.
amp_thresh : float (optional)
intensity threshold above which
default is set to 10% of max(y)
higher values will neglect smaller features.
medfilt_radius : int (optional)
median filter window to apply to smooth the data
(see scipy.signal.medfilt)
if 0, no filter will be applied.
default is set to 5
peakgroup : int (optional)
number of points around the "top part" of the peak
default is set to 10
maxpeakn : int (optional)
number of maximum detectable peaks
default is set to 5000
subpix : bool (optional)
default is set to True
parallel : {None, bool}
Perform the operation in a threaded (parallel) manner.
Returns
-------
peaks : structured array of shape _navigation_shape_in_array in which
each cell contains an array that contains as many structured arrays as
peaks where found at that location and which fields: position, height,
width, contains position, height, and width of each peak.
Raises
------
SignalDimensionError if the signal dimension is not 1.
"""
# TODO: add scipy.signal.find_peaks_cwt
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0].axis
peaks = self.map(find_peaks_ohaver,
x=axis,
slope_thresh=slope_thresh,
amp_thresh=amp_thresh,
medfilt_radius=medfilt_radius,
maxpeakn=maxpeakn,
peakgroup=peakgroup,
subchannel=subchannel,
ragged=True,
parallel=parallel,
inplace=False)
return peaks.data
def estimate_peak_width(self,
factor=0.5,
window=None,
return_interval=False,
parallel=None,
show_progressbar=None):
"""Estimate the width of the highest intensity of peak
of the spectra at a given fraction of its maximum.
It can be used with asymmetric peaks. For accurate results any
background must be previously substracted.
The estimation is performed by interpolation using cubic splines.
Parameters
----------
factor : 0 < float < 1
The default, 0.5, estimates the FWHM.
window : None, float
The size of the window centred at the peak maximum
used to perform the estimation.
The window size must be chosen with care: if it is narrower
than the width of the peak at some positions or if it is
so wide that it includes other more intense peaks this
method cannot compute the width and a NaN is stored instead.
return_interval: bool
If True, returns 2 extra signals with the positions of the
desired height fraction at the left and right of the
peak.
parallel : {None, bool}
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Returns
-------
width or [width, left, right], depending on the value of
`return_interval`.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
if not 0 < factor < 1:
raise ValueError("factor must be between 0 and 1.")
axis = self.axes_manager.signal_axes[0]
# x = axis.axis
maxval = self.axes_manager.navigation_size
show_progressbar = show_progressbar and maxval > 0
def estimating_function(spectrum,
window=None,
factor=0.5,
axis=None):
x = axis.axis
if window is not None:
vmax = axis.index2value(spectrum.argmax())
slices = axis._get_array_slices(
slice(vmax - window * 0.5, vmax + window * 0.5))
spectrum = spectrum[slices]
x = x[slices]
spline = scipy.interpolate.UnivariateSpline(
x,
spectrum - factor * spectrum.max(),
s=0)
roots = spline.roots()
if len(roots) == 2:
return np.array(roots)
else:
return np.full((2,), np.nan)
both = self._map_iterate(estimating_function,
window=window,
factor=factor,
axis=axis,
ragged=False,
inplace=False,
parallel=parallel,
show_progressbar=show_progressbar)
left, right = both.T.split()
width = right - left
if factor == 0.5:
width.metadata.General.title = (
self.metadata.General.title + " FWHM")
left.metadata.General.title = (
self.metadata.General.title + " FWHM left position")
right.metadata.General.title = (
self.metadata.General.title + " FWHM right position")
else:
width.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum" % factor)
left.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum left position" % factor)
right.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum right position" % factor)
for signal in (left, width, right):
signal.axes_manager.set_signal_dimension(0)
signal.set_signal_type("")
if return_interval is True:
return [width, left, right]
else:
return width
class LazySignal1D(LazySignal, Signal1D):
"""
"""
_lazy = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.axes_manager.set_signal_dimension(1)
| gpl-3.0 |
rmetzger/flink | flink-python/pyflink/table/table_environment.py | 2 | 89253 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import sys
import tempfile
import warnings
from typing import Union, List, Tuple, Iterable
from py4j.java_gateway import get_java_class, get_method
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table.sources import TableSource
from pyflink.common.typeinfo import TypeInformation
from pyflink.datastream.data_stream import DataStream
from pyflink.common import JobExecutionResult
from pyflink.dataset import ExecutionEnvironment
from pyflink.java_gateway import get_gateway
from pyflink.serializers import BatchedSerializer, PickleSerializer
from pyflink.table import Table, EnvironmentSettings, Expression, ExplainDetail, \
Module, ModuleEntry, TableSink
from pyflink.table.catalog import Catalog
from pyflink.table.descriptors import StreamTableDescriptor, BatchTableDescriptor, \
ConnectorDescriptor, ConnectTableDescriptor
from pyflink.table.serializers import ArrowSerializer
from pyflink.table.statement_set import StatementSet
from pyflink.table.table_config import TableConfig
from pyflink.table.table_result import TableResult
from pyflink.table.types import _to_java_type, _create_type_verifier, RowType, DataType, \
_infer_schema_from_data, _create_converter, from_arrow_type, RowField, create_arrow_schema, \
_to_java_data_type
from pyflink.table.udf import UserDefinedFunctionWrapper, AggregateFunction, udaf, \
UserDefinedAggregateFunctionWrapper, udtaf, TableAggregateFunction
from pyflink.table.utils import to_expression_jarray
from pyflink.util import java_utils
from pyflink.util.java_utils import get_j_env_configuration, is_local_deployment, load_java_class, \
to_j_explain_detail_arr, to_jarray
__all__ = [
'BatchTableEnvironment',
'StreamTableEnvironment',
'TableEnvironment'
]
class TableEnvironment(object):
"""
A table environment is the base class, entry point, and central context for creating Table
and SQL API programs.
It is unified for bounded and unbounded data processing.
A table environment is responsible for:
- Connecting to external systems.
- Registering and retrieving :class:`~pyflink.table.Table` and other meta objects from a
catalog.
- Executing SQL statements.
- Offering further configuration options.
The path in methods such as :func:`create_temporary_view`
should be a proper SQL identifier. The syntax is following
[[catalog-name.]database-name.]object-name, where the catalog name and database are optional.
For path resolution see :func:`use_catalog` and :func:`use_database`. All keywords or other
special characters need to be escaped.
Example: `cat.1`.`db`.`Table` resolves to an object named 'Table' (table is a reserved
keyword, thus must be escaped) in a catalog named 'cat.1' and database named 'db'.
.. note::
This environment is meant for pure table programs. If you would like to convert from or to
other Flink APIs, it might be necessary to use one of the available language-specific table
environments in the corresponding bridging modules.
"""
def __init__(self, j_tenv, serializer=PickleSerializer()):
self._j_tenv = j_tenv
self._is_blink_planner = TableEnvironment._judge_blink_planner(j_tenv)
self._serializer = serializer
# When running in MiniCluster, launch the Python UDF worker using the Python executable
# specified by sys.executable if users have not specified it explicitly via configuration
# python.executable.
self._set_python_executable_for_local_executor()
@staticmethod
def create(environment_settings: EnvironmentSettings) -> 'TableEnvironment':
"""
Creates a table environment that is the entry point and central context for creating Table
and SQL API programs.
:param environment_settings: The environment settings used to instantiate the
:class:`~pyflink.table.TableEnvironment`.
:return: The :class:`~pyflink.table.TableEnvironment`.
"""
gateway = get_gateway()
j_tenv = gateway.jvm.TableEnvironment.create(
environment_settings._j_environment_settings)
return TableEnvironment(j_tenv)
@staticmethod
def _judge_blink_planner(j_tenv):
if "getPlanner" not in dir(j_tenv):
return False
else:
j_planner_class = j_tenv.getPlanner().getClass()
j_blink_planner_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.planner.delegation.PlannerBase)
return j_blink_planner_class.isAssignableFrom(j_planner_class)
def from_table_source(self, table_source: 'TableSource') -> 'Table':
"""
Creates a table from a table source.
Example:
::
>>> csv_table_source = CsvTableSource(
... csv_file_path, ['a', 'b'], [DataTypes.STRING(), DataTypes.BIGINT()])
>>> table_env.from_table_source(csv_table_source)
:param table_source: The table source used as table.
:return: The result table.
"""
warnings.warn("Deprecated in 1.11.", DeprecationWarning)
return Table(self._j_tenv.fromTableSource(table_source._j_table_source), self)
def register_catalog(self, catalog_name: str, catalog: Catalog):
"""
Registers a :class:`~pyflink.table.catalog.Catalog` under a unique name.
All tables registered in the :class:`~pyflink.table.catalog.Catalog` can be accessed.
:param catalog_name: The name under which the catalog will be registered.
:param catalog: The catalog to register.
"""
self._j_tenv.registerCatalog(catalog_name, catalog._j_catalog)
def get_catalog(self, catalog_name: str) -> Catalog:
"""
Gets a registered :class:`~pyflink.table.catalog.Catalog` by name.
:param catalog_name: The name to look up the :class:`~pyflink.table.catalog.Catalog`.
:return: The requested catalog, None if there is no
registered catalog with given name.
"""
catalog = self._j_tenv.getCatalog(catalog_name)
if catalog.isPresent():
return Catalog(catalog.get())
else:
return None
def load_module(self, module_name: str, module: Module):
"""
Loads a :class:`~pyflink.table.Module` under a unique name. Modules will be kept
in the loaded order.
ValidationException is thrown when there is already a module with the same name.
:param module_name: Name of the :class:`~pyflink.table.Module`.
:param module: The module instance.
.. versionadded:: 1.12.0
"""
self._j_tenv.loadModule(module_name, module._j_module)
def unload_module(self, module_name: str):
"""
Unloads a :class:`~pyflink.table.Module` with given name.
ValidationException is thrown when there is no module with the given name.
:param module_name: Name of the :class:`~pyflink.table.Module`.
.. versionadded:: 1.12.0
"""
self._j_tenv.unloadModule(module_name)
def use_modules(self, *module_names: str):
"""
Use an array of :class:`~pyflink.table.Module` with given names.
ValidationException is thrown when there is duplicate name or no module with the given name.
:param module_names: Names of the modules to be used.
.. versionadded:: 1.13.0
"""
j_module_names = to_jarray(get_gateway().jvm.String, module_names)
self._j_tenv.useModules(j_module_names)
def create_java_temporary_system_function(self, name: str, function_class_name: str):
"""
Registers a java user defined function class as a temporary system function.
Compared to .. seealso:: :func:`create_java_temporary_function`, system functions are
identified by a global name that is independent of the current catalog and current
database. Thus, this method allows to extend the set of built-in system functions like
TRIM, ABS, etc.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary system function.
Example:
::
>>> table_env.create_java_temporary_system_function("func",
... "java.user.defined.function.class.name")
:param name: The name under which the function will be registered globally.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
self._j_tenv.createTemporarySystemFunction(name, java_function)
def create_temporary_system_function(self, name: str,
function: Union[UserDefinedFunctionWrapper,
AggregateFunction]):
"""
Registers a python user defined function class as a temporary system function.
Compared to .. seealso:: :func:`create_temporary_function`, system functions are identified
by a global name that is independent of the current catalog and current database. Thus,
this method allows to extend the set of built-in system functions like TRIM, ABS, etc.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary system function.
Example:
::
>>> table_env.create_temporary_system_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.create_temporary_system_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.create_temporary_system_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param name: The name under which the function will be registered globally.
:param function: The function class containing the implementation. The function must have a
public no-argument constructor and can be founded in current Java
classloader.
.. versionadded:: 1.12.0
"""
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
self._j_tenv.createTemporarySystemFunction(name, java_function)
def drop_temporary_system_function(self, name: str) -> bool:
"""
Drops a temporary system function registered under the given name.
If a permanent function with the given name exists, it will be used from now on for any
queries that reference this name.
:param name: The name under which the function has been registered globally.
:return: true if a function existed under the given name and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropTemporarySystemFunction(name)
def create_java_function(self, path: str, function_class_name: str,
ignore_if_exists: bool = None):
"""
Registers a java user defined function class as a catalog function in the given path.
Compared to system functions with a globally defined name, catalog functions are always
(implicitly or explicitly) identified by a catalog and database.
There must not be another function (temporary or permanent) registered under the same path.
Example:
::
>>> table_env.create_java_function("func", "java.user.defined.function.class.name")
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
:param ignore_if_exists: If a function exists under the given path and this flag is set,
no operation is executed. An exception is thrown otherwise.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
if ignore_if_exists is None:
self._j_tenv.createFunction(path, java_function)
else:
self._j_tenv.createFunction(path, java_function, ignore_if_exists)
def drop_function(self, path: str) -> bool:
"""
Drops a catalog function registered in the given path.
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:return: true if a function existed in the given path and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropFunction(path)
def create_java_temporary_function(self, path: str, function_class_name: str):
"""
Registers a java user defined function class as a temporary catalog function.
Compared to .. seealso:: :func:`create_java_temporary_system_function` with a globally
defined name, catalog functions are always (implicitly or explicitly) identified by a
catalog and database.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary function.
Example:
::
>>> table_env.create_java_temporary_function("func",
... "java.user.defined.function.class.name")
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
self._j_tenv.createTemporaryFunction(path, java_function)
def create_temporary_function(self, path: str, function: Union[UserDefinedFunctionWrapper,
AggregateFunction]):
"""
Registers a python user defined function class as a temporary catalog function.
Compared to .. seealso:: :func:`create_temporary_system_function` with a globally defined
name, catalog functions are always (implicitly or explicitly) identified by a catalog and
database.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary function.
Example:
::
>>> table_env.create_temporary_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.create_temporary_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.create_temporary_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function: The function class containing the implementation. The function must have a
public no-argument constructor and can be founded in current Java
classloader.
.. versionadded:: 1.12.0
"""
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
self._j_tenv.createTemporaryFunction(path, java_function)
def drop_temporary_function(self, path: str) -> bool:
"""
Drops a temporary system function registered under the given name.
If a permanent function with the given name exists, it will be used from now on for any
queries that reference this name.
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:return: true if a function existed in the given path and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropTemporaryFunction(path)
def register_table(self, name: str, table: Table):
"""
Registers a :class:`~pyflink.table.Table` under a unique name in the TableEnvironment's
catalog. Registered tables can be referenced in SQL queries.
Example:
::
>>> tab = table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['a', 'b'])
>>> table_env.register_table("source", tab)
:param name: The name under which the table will be registered.
:param table: The table to register.
.. note:: Deprecated in 1.10. Use :func:`create_temporary_view` instead.
"""
warnings.warn("Deprecated in 1.10. Use create_temporary_view instead.", DeprecationWarning)
self._j_tenv.registerTable(name, table._j_table)
def register_table_source(self, name: str, table_source: TableSource):
"""
Registers an external :class:`~pyflink.table.TableSource` in this
:class:`~pyflink.table.TableEnvironment`'s catalog. Registered tables can be referenced in
SQL queries.
Example:
::
>>> table_env.register_table_source("source",
... CsvTableSource("./1.csv",
... ["a", "b"],
... [DataTypes.INT(),
... DataTypes.STRING()]))
:param name: The name under which the table source is registered.
:param table_source: The table source to register.
.. note:: Deprecated in 1.10. Use :func:`execute_sql` instead.
"""
warnings.warn("Deprecated in 1.10. Use connect instead.", DeprecationWarning)
self._j_tenv.registerTableSourceInternal(name, table_source._j_table_source)
def register_table_sink(self, name: str, table_sink: TableSink):
"""
Registers an external :class:`~pyflink.table.TableSink` with given field names and types in
this :class:`~pyflink.table.TableEnvironment`'s catalog. Registered sink tables can be
referenced in SQL DML statements.
Example:
::
>>> table_env.register_table_sink("sink",
... CsvTableSink(["a", "b"],
... [DataTypes.INT(),
... DataTypes.STRING()],
... "./2.csv"))
:param name: The name under which the table sink is registered.
:param table_sink: The table sink to register.
.. note:: Deprecated in 1.10. Use :func:`execute_sql` instead.
"""
warnings.warn("Deprecated in 1.10. Use connect instead.", DeprecationWarning)
self._j_tenv.registerTableSinkInternal(name, table_sink._j_table_sink)
def scan(self, *table_path: str) -> Table:
"""
Scans a registered table and returns the resulting :class:`~pyflink.table.Table`.
A table to scan must be registered in the TableEnvironment. It can be either directly
registered or be an external member of a :class:`~pyflink.table.catalog.Catalog`.
See the documentation of :func:`~pyflink.table.TableEnvironment.use_database` or
:func:`~pyflink.table.TableEnvironment.use_catalog` for the rules on the path resolution.
Examples:
Scanning a directly registered table
::
>>> tab = table_env.scan("tableName")
Scanning a table from a registered catalog
::
>>> tab = table_env.scan("catalogName", "dbName", "tableName")
:param table_path: The path of the table to scan.
:throws: Exception if no table is found using the given table path.
:return: The resulting table.
.. note:: Deprecated in 1.10. Use :func:`from_path` instead.
"""
warnings.warn("Deprecated in 1.10. Use from_path instead.", DeprecationWarning)
gateway = get_gateway()
j_table_paths = java_utils.to_jarray(gateway.jvm.String, table_path)
j_table = self._j_tenv.scan(j_table_paths)
return Table(j_table, self)
def from_path(self, path: str) -> Table:
"""
Reads a registered table and returns the resulting :class:`~pyflink.table.Table`.
A table to scan must be registered in the :class:`~pyflink.table.TableEnvironment`.
See the documentation of :func:`use_database` or :func:`use_catalog` for the rules on the
path resolution.
Examples:
Reading a table from default catalog and database.
::
>>> tab = table_env.from_path("tableName")
Reading a table from a registered catalog.
::
>>> tab = table_env.from_path("catalogName.dbName.tableName")
Reading a table from a registered catalog with escaping. (`Table` is a reserved keyword).
Dots in e.g. a database name also must be escaped.
::
>>> tab = table_env.from_path("catalogName.`db.Name`.`Table`")
:param path: The path of a table API object to scan.
:return: Either a table or virtual table (=view).
.. seealso:: :func:`use_catalog`
.. seealso:: :func:`use_database`
.. versionadded:: 1.10.0
"""
return Table(get_method(self._j_tenv, "from")(path), self)
def insert_into(self, target_path: str, table: Table):
"""
Instructs to write the content of a :class:`~pyflink.table.Table` API object into a table.
See the documentation of :func:`use_database` or :func:`use_catalog` for the rules on the
path resolution.
Example:
::
>>> tab = table_env.scan("tableName")
>>> table_env.insert_into("sink", tab)
:param target_path: The path of the registered :class:`~pyflink.table.TableSink` to which
the :class:`~pyflink.table.Table` is written.
:param table: The Table to write to the sink.
.. versionchanged:: 1.10.0
The signature is changed, e.g. the parameter *table_path_continued* was removed and
the parameter *target_path* is moved before the parameter *table*.
.. note:: Deprecated in 1.11. Use :func:`execute_insert` for single sink,
use :func:`create_statement_set` for multiple sinks.
"""
warnings.warn("Deprecated in 1.11. Use Table#execute_insert for single sink,"
"use create_statement_set for multiple sinks.", DeprecationWarning)
self._j_tenv.insertInto(target_path, table._j_table)
def list_catalogs(self) -> List[str]:
"""
Gets the names of all catalogs registered in this environment.
:return: List of catalog names.
"""
j_catalog_name_array = self._j_tenv.listCatalogs()
return [item for item in j_catalog_name_array]
def list_modules(self) -> List[str]:
"""
Gets the names of all modules used in this environment.
:return: List of module names.
.. versionadded:: 1.10.0
"""
j_module_name_array = self._j_tenv.listModules()
return [item for item in j_module_name_array]
def list_full_modules(self) -> List[ModuleEntry]:
"""
Gets the names and statuses of all modules loaded in this environment.
:return: List of module names and use statuses.
.. versionadded:: 1.13.0
"""
j_module_entry_array = self._j_tenv.listFullModules()
return [ModuleEntry(entry.name(), entry.used()) for entry in j_module_entry_array]
def list_databases(self) -> List[str]:
"""
Gets the names of all databases in the current catalog.
:return: List of database names in the current catalog.
"""
j_database_name_array = self._j_tenv.listDatabases()
return [item for item in j_database_name_array]
def list_tables(self) -> List[str]:
"""
Gets the names of all tables and views in the current database of the current catalog.
It returns both temporary and permanent tables and views.
:return: List of table and view names in the current database of the current catalog.
"""
j_table_name_array = self._j_tenv.listTables()
return [item for item in j_table_name_array]
def list_views(self) -> List[str]:
"""
Gets the names of all views in the current database of the current catalog.
It returns both temporary and permanent views.
:return: List of view names in the current database of the current catalog.
.. versionadded:: 1.11.0
"""
j_view_name_array = self._j_tenv.listViews()
return [item for item in j_view_name_array]
def list_user_defined_functions(self) -> List[str]:
"""
Gets the names of all user defined functions registered in this environment.
:return: List of the names of all user defined functions registered in this environment.
"""
j_udf_name_array = self._j_tenv.listUserDefinedFunctions()
return [item for item in j_udf_name_array]
def list_functions(self) -> List[str]:
"""
Gets the names of all functions in this environment.
:return: List of the names of all functions in this environment.
.. versionadded:: 1.10.0
"""
j_function_name_array = self._j_tenv.listFunctions()
return [item for item in j_function_name_array]
def list_temporary_tables(self) -> List[str]:
"""
Gets the names of all temporary tables and views available in the current namespace
(the current database of the current catalog).
:return: A list of the names of all registered temporary tables and views in the current
database of the current catalog.
.. seealso:: :func:`list_tables`
.. versionadded:: 1.10.0
"""
j_table_name_array = self._j_tenv.listTemporaryTables()
return [item for item in j_table_name_array]
def list_temporary_views(self) -> List[str]:
"""
Gets the names of all temporary views available in the current namespace (the current
database of the current catalog).
:return: A list of the names of all registered temporary views in the current database
of the current catalog.
.. seealso:: :func:`list_tables`
.. versionadded:: 1.10.0
"""
j_view_name_array = self._j_tenv.listTemporaryViews()
return [item for item in j_view_name_array]
def drop_temporary_table(self, table_path: str) -> bool:
"""
Drops a temporary table registered in the given path.
If a permanent table with a given path exists, it will be used
from now on for any queries that reference this path.
:param table_path: The path of the registered temporary table.
:return: True if a table existed in the given path and was removed.
.. versionadded:: 1.10.0
"""
return self._j_tenv.dropTemporaryTable(table_path)
def drop_temporary_view(self, view_path: str) -> bool:
"""
Drops a temporary view registered in the given path.
If a permanent table or view with a given path exists, it will be used
from now on for any queries that reference this path.
:return: True if a view existed in the given path and was removed.
.. versionadded:: 1.10.0
"""
return self._j_tenv.dropTemporaryView(view_path)
def explain(self, table: Table = None, extended: bool = False) -> str:
"""
Returns the AST of the specified Table API and SQL queries and the execution plan to compute
the result of the given :class:`~pyflink.table.Table` or multi-sinks plan.
:param table: The table to be explained. If table is None, explain for multi-sinks plan,
else for given table.
:param extended: If the plan should contain additional properties.
e.g. estimated cost, traits
:return: The table for which the AST and execution plan will be returned.
.. note:: Deprecated in 1.11. Use :class:`Table`#:func:`explain` instead.
"""
warnings.warn("Deprecated in 1.11. Use Table#explain instead.", DeprecationWarning)
if table is None:
return self._j_tenv.explain(extended)
else:
return self._j_tenv.explain(table._j_table, extended)
def explain_sql(self, stmt: str, *extra_details: ExplainDetail) -> str:
"""
Returns the AST of the specified statement and the execution plan.
:param stmt: The statement for which the AST and execution plan will be returned.
:param extra_details: The extra explain details which the explain result should include,
e.g. estimated cost, changelog mode for streaming
:return: The statement for which the AST and execution plan will be returned.
.. versionadded:: 1.11.0
"""
j_extra_details = to_j_explain_detail_arr(extra_details)
return self._j_tenv.explainSql(stmt, j_extra_details)
def sql_query(self, query: str) -> Table:
"""
Evaluates a SQL query on registered tables and retrieves the result as a
:class:`~pyflink.table.Table`.
All tables referenced by the query must be registered in the TableEnvironment.
A :class:`~pyflink.table.Table` is automatically registered when its
:func:`~Table.__str__` method is called, for example when it is embedded into a String.
Hence, SQL queries can directly reference a :class:`~pyflink.table.Table` as follows:
::
>>> table = ...
# the table is not registered to the table environment
>>> table_env.sql_query("SELECT * FROM %s" % table)
:param query: The sql query string.
:return: The result table.
"""
j_table = self._j_tenv.sqlQuery(query)
return Table(j_table, self)
def execute_sql(self, stmt: str) -> TableResult:
"""
Execute the given single statement, and return the execution result.
The statement can be DDL/DML/DQL/SHOW/DESCRIBE/EXPLAIN/USE.
For DML and DQL, this method returns TableResult once the job has been submitted.
For DDL and DCL statements, TableResult is returned once the operation has finished.
:return content for DQL/SHOW/DESCRIBE/EXPLAIN,
the affected row count for `DML` (-1 means unknown),
or a string message ("OK") for other statements.
.. versionadded:: 1.11.0
"""
self._before_execute()
return TableResult(self._j_tenv.executeSql(stmt))
def create_statement_set(self) -> StatementSet:
"""
Create a StatementSet instance which accepts DML statements or Tables,
the planner can optimize all added statements and Tables together
and then submit as one job.
:return statement_set instance
.. versionadded:: 1.11.0
"""
_j_statement_set = self._j_tenv.createStatementSet()
return StatementSet(_j_statement_set, self)
def sql_update(self, stmt: str):
"""
Evaluates a SQL statement such as INSERT, UPDATE or DELETE or a DDL statement
.. note::
Currently only SQL INSERT statements and CREATE TABLE statements are supported.
All tables referenced by the query must be registered in the TableEnvironment.
A :class:`~pyflink.table.Table` is automatically registered when its
:func:`~Table.__str__` method is called, for example when it is embedded into a String.
Hence, SQL queries can directly reference a :class:`~pyflink.table.Table` as follows:
::
# register the table sink into which the result is inserted.
>>> table_env.register_table_sink("sink_table", table_sink)
>>> source_table = ...
# source_table is not registered to the table environment
>>> table_env.sql_update("INSERT INTO sink_table SELECT * FROM %s" % source_table)
A DDL statement can also be executed to create/drop a table:
For example, the below DDL statement would create a CSV table named `tbl1`
into the current catalog::
create table tbl1(
a int,
b bigint,
c varchar
) with (
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = 'xxx'
)
SQL queries can directly execute as follows:
::
>>> source_ddl = \\
... '''
... create table sourceTable(
... a int,
... b varchar
... ) with (
... 'connector.type' = 'kafka',
... 'update-mode' = 'append',
... 'connector.topic' = 'xxx',
... 'connector.properties.bootstrap.servers' = 'localhost:9092'
... )
... '''
>>> sink_ddl = \\
... '''
... create table sinkTable(
... a int,
... b varchar
... ) with (
... 'connector.type' = 'filesystem',
... 'format.type' = 'csv',
... 'connector.path' = 'xxx'
... )
... '''
>>> query = "INSERT INTO sinkTable SELECT FROM sourceTable"
>>> table_env.sql(source_ddl)
>>> table_env.sql(sink_ddl)
>>> table_env.sql(query)
>>> table_env.execute("MyJob")
:param stmt: The SQL statement to evaluate.
.. note:: Deprecated in 1.11. Use :func:`execute_sql` for single statement,
use :func:`create_statement_set` for multiple DML statements.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql for single statement, "
"use create_statement_set for multiple DML statements.", DeprecationWarning)
self._j_tenv.sqlUpdate(stmt)
def get_current_catalog(self) -> str:
"""
Gets the current default catalog name of the current session.
:return: The current default catalog name that is used for the path resolution.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_catalog`
"""
return self._j_tenv.getCurrentCatalog()
def use_catalog(self, catalog_name: str):
"""
Sets the current catalog to the given value. It also sets the default
database to the catalog's default one.
See also :func:`~TableEnvironment.use_database`.
This is used during the resolution of object paths. Both the catalog and database are
optional when referencing catalog objects such as tables, views etc. The algorithm looks for
requested objects in following paths in that order:
* ``[current-catalog].[current-database].[requested-path]``
* ``[current-catalog].[requested-path]``
* ``[requested-path]``
Example:
Given structure with default catalog set to ``default_catalog`` and default database set to
``default_database``. ::
root:
|- default_catalog
|- default_database
|- tab1
|- db1
|- tab1
|- cat1
|- db1
|- tab1
The following table describes resolved paths:
+----------------+-----------------------------------------+
| Requested path | Resolved path |
+================+=========================================+
| tab1 | default_catalog.default_database.tab1 |
+----------------+-----------------------------------------+
| db1.tab1 | default_catalog.db1.tab1 |
+----------------+-----------------------------------------+
| cat1.db1.tab1 | cat1.db1.tab1 |
+----------------+-----------------------------------------+
:param catalog_name: The name of the catalog to set as the current default catalog.
:throws: :class:`~pyflink.util.exceptions.CatalogException` thrown if a catalog with given
name could not be set as the default one.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_database`
"""
self._j_tenv.useCatalog(catalog_name)
def get_current_database(self) -> str:
"""
Gets the current default database name of the running session.
:return: The name of the current database of the current catalog.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_database`
"""
return self._j_tenv.getCurrentDatabase()
def use_database(self, database_name: str):
"""
Sets the current default database. It has to exist in the current catalog. That path will
be used as the default one when looking for unqualified object names.
This is used during the resolution of object paths. Both the catalog and database are
optional when referencing catalog objects such as tables, views etc. The algorithm looks for
requested objects in following paths in that order:
* ``[current-catalog].[current-database].[requested-path]``
* ``[current-catalog].[requested-path]``
* ``[requested-path]``
Example:
Given structure with default catalog set to ``default_catalog`` and default database set to
``default_database``. ::
root:
|- default_catalog
|- default_database
|- tab1
|- db1
|- tab1
|- cat1
|- db1
|- tab1
The following table describes resolved paths:
+----------------+-----------------------------------------+
| Requested path | Resolved path |
+================+=========================================+
| tab1 | default_catalog.default_database.tab1 |
+----------------+-----------------------------------------+
| db1.tab1 | default_catalog.db1.tab1 |
+----------------+-----------------------------------------+
| cat1.db1.tab1 | cat1.db1.tab1 |
+----------------+-----------------------------------------+
:throws: :class:`~pyflink.util.exceptions.CatalogException` thrown if the given catalog and
database could not be set as the default ones.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_catalog`
:param database_name: The name of the database to set as the current database.
"""
self._j_tenv.useDatabase(database_name)
def get_config(self) -> TableConfig:
"""
Returns the table config to define the runtime behavior of the Table API.
:return: Current table config.
"""
if not hasattr(self, "table_config"):
table_config = TableConfig()
table_config._j_table_config = self._j_tenv.getConfig()
setattr(self, "table_config", table_config)
return getattr(self, "table_config")
def connect(self, connector_descriptor: ConnectorDescriptor) -> ConnectTableDescriptor:
"""
Creates a temporary table from a descriptor.
Descriptors allow for declaring the communication to external systems in an
implementation-agnostic way. The classpath is scanned for suitable table factories that
match the desired configuration.
The following example shows how to read from a connector using a JSON format and
registering a temporary table as "MyTable":
::
>>> table_env \\
... .connect(ExternalSystemXYZ()
... .version("0.11")) \\
... .with_format(Json()
... .json_schema("{...}")
... .fail_on_missing_field(False)) \\
... .with_schema(Schema()
... .field("user-name", "VARCHAR")
... .from_origin_field("u_name")
... .field("count", "DECIMAL")) \\
... .create_temporary_table("MyTable")
:param connector_descriptor: Connector descriptor describing the external system.
:return: A :class:`~pyflink.table.descriptors.ConnectTableDescriptor` used to build the
temporary table.
.. note:: Deprecated in 1.11. Use :func:`execute_sql` to register a table instead.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql instead.", DeprecationWarning)
return StreamTableDescriptor(
self._j_tenv.connect(connector_descriptor._j_connector_descriptor))
def register_java_function(self, name: str, function_class_name: str):
"""
Registers a java user defined function under a unique name. Replaces already existing
user-defined functions under this name. The acceptable function type contains
**ScalarFunction**, **TableFunction** and **AggregateFunction**.
Example:
::
>>> table_env.register_java_function("func1", "java.user.defined.function.class.name")
:param name: The name under which the function is registered.
:param function_class_name: The java full qualified class name of the function to register.
The function must have a public no-argument constructor and can
be founded in current Java classloader.
.. note:: Deprecated in 1.12. Use :func:`create_java_temporary_system_function` instead.
"""
warnings.warn("Deprecated in 1.12. Use :func:`create_java_temporary_system_function` "
"instead.", DeprecationWarning)
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader()\
.loadClass(function_class_name).newInstance()
# this is a temporary solution and will be unified later when we use the new type
# system(DataType) to replace the old type system(TypeInformation).
if (self._is_blink_planner and not isinstance(self, StreamTableEnvironment)) or \
self.__class__ == TableEnvironment:
if self._is_table_function(java_function):
self._register_table_function(name, java_function)
elif self._is_aggregate_function(java_function):
self._register_aggregate_function(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
def register_function(self, name: str, function: UserDefinedFunctionWrapper):
"""
Registers a python user-defined function under a unique name. Replaces already existing
user-defined function under this name.
Example:
::
>>> table_env.register_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.register_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.register_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param name: The name under which the function is registered.
:param function: The python user-defined function to register.
.. versionadded:: 1.10.0
.. note:: Deprecated in 1.12. Use :func:`create_temporary_system_function` instead.
"""
warnings.warn("Deprecated in 1.12. Use :func:`create_temporary_system_function` "
"instead.", DeprecationWarning)
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
# this is a temporary solution and will be unified later when we use the new type
# system(DataType) to replace the old type system(TypeInformation).
if (self._is_blink_planner and isinstance(self, BatchTableEnvironment)) or \
self.__class__ == TableEnvironment:
if self._is_table_function(java_function):
self._register_table_function(name, java_function)
elif self._is_aggregate_function(java_function):
self._register_aggregate_function(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
def create_temporary_view(self, view_path: str, table: Table):
"""
Registers a :class:`~pyflink.table.Table` API object as a temporary view similar to SQL
temporary views.
Temporary objects can shadow permanent ones. If a permanent object in a given path exists,
it will be inaccessible in the current session. To make the permanent object available
again you can drop the corresponding temporary object.
:param view_path: The path under which the view will be registered. See also the
:class:`~pyflink.table.TableEnvironment` class description for the format
of the path.
:param table: The view to register.
.. versionadded:: 1.10.0
"""
self._j_tenv.createTemporaryView(view_path, table._j_table)
def add_python_file(self, file_path: str):
"""
Adds a python dependency which could be python files, python packages or
local directories. They will be added to the PYTHONPATH of the python UDF worker.
Please make sure that these dependencies can be imported.
:param file_path: The path of the python dependency.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
python_files = self.get_config().get_configuration().get_string(
jvm.PythonOptions.PYTHON_FILES.key(), None)
if python_files is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join([file_path, python_files])
else:
python_files = file_path
self.get_config().get_configuration().set_string(
jvm.PythonOptions.PYTHON_FILES.key(), python_files)
def set_python_requirements(self,
requirements_file_path: str,
requirements_cache_dir: str = None):
"""
Specifies a requirements.txt file which defines the third-party dependencies.
These dependencies will be installed to a temporary directory and added to the
PYTHONPATH of the python UDF worker.
For the dependencies which could not be accessed in the cluster, a directory which contains
the installation packages of these dependencies could be specified using the parameter
"requirements_cached_dir". It will be uploaded to the cluster to support offline
installation.
Example:
::
# commands executed in shell
$ echo numpy==1.16.5 > requirements.txt
$ pip download -d cached_dir -r requirements.txt --no-binary :all:
# python code
>>> table_env.set_python_requirements("requirements.txt", "cached_dir")
.. note::
Please make sure the installation packages matches the platform of the cluster
and the python version used. These packages will be installed using pip,
so also make sure the version of Pip (version >= 7.1.0) and the version of
SetupTools (version >= 37.0.0).
:param requirements_file_path: The path of "requirements.txt" file.
:param requirements_cache_dir: The path of the local directory which contains the
installation packages.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
python_requirements = requirements_file_path
if requirements_cache_dir is not None:
python_requirements = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[python_requirements, requirements_cache_dir])
self.get_config().get_configuration().set_string(
jvm.PythonOptions.PYTHON_REQUIREMENTS.key(), python_requirements)
def add_python_archive(self, archive_path: str, target_dir: str = None):
"""
Adds a python archive file. The file will be extracted to the working directory of
python UDF worker.
If the parameter "target_dir" is specified, the archive file will be extracted to a
directory named ${target_dir}. Otherwise, the archive file will be extracted to a
directory with the same name of the archive file.
If python UDF depends on a specific python version which does not exist in the cluster,
this method can be used to upload the virtual environment.
Note that the path of the python interpreter contained in the uploaded environment
should be specified via the method :func:`pyflink.table.TableConfig.set_python_executable`.
The files uploaded via this method are also accessible in UDFs via relative path.
Example:
::
# command executed in shell
# assert the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> table_env.add_python_archive("py_env.zip")
>>> table_env.get_config().set_python_executable("py_env.zip/py_env/bin/python")
# or
>>> table_env.add_python_archive("py_env.zip", "myenv")
>>> table_env.get_config().set_python_executable("myenv/py_env/bin/python")
# the files contained in the archive file can be accessed in UDF
>>> def my_udf():
... with open("myenv/py_env/data/data.txt") as f:
... ...
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.5 or higher.
.. note::
Currently only zip-format is supported. i.e. zip, jar, whl, egg, etc.
The other archive formats such as tar, tar.gz, 7z, rar, etc are not supported.
:param archive_path: The archive file path.
:param target_dir: Optional, the target dir name that the archive file extracted to.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
if target_dir is not None:
archive_path = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[archive_path, target_dir])
python_archives = self.get_config().get_configuration().get_string(
jvm.PythonOptions.PYTHON_ARCHIVES.key(), None)
if python_archives is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join(
[python_archives, archive_path])
else:
python_files = archive_path
self.get_config().get_configuration().set_string(
jvm.PythonOptions.PYTHON_ARCHIVES.key(), python_files)
def execute(self, job_name: str) -> JobExecutionResult:
"""
Triggers the program execution. The environment will execute all parts of
the program.
The program execution will be logged and displayed with the provided name.
.. note::
It is highly advised to set all parameters in the :class:`~pyflink.table.TableConfig`
on the very beginning of the program. It is undefined what configurations values will
be used for the execution if queries are mixed with config changes. It depends on
the characteristic of the particular parameter. For some of them the value from the
point in time of query construction (e.g. the current catalog) will be used. On the
other hand some values might be evaluated according to the state from the time when
this method is called (e.g. timezone).
:param job_name: Desired name of the job.
:return: The result of the job execution, containing elapsed time and accumulators.
.. note:: Deprecated in 1.11. Use :func:`execute_sql` for single sink,
use :func:`create_statement_set` for multiple sinks.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql for single sink, "
"use create_statement_set for multiple sinks.", DeprecationWarning)
self._before_execute()
return JobExecutionResult(self._j_tenv.execute(job_name))
def from_elements(self, elements: Iterable, schema: Union[DataType, List[str]] = None,
verify_schema: bool = True) -> Table:
"""
Creates a table from a collection of elements.
The elements types must be acceptable atomic types or acceptable composite types.
All elements must be of the same type.
If the elements types are composite types, the composite types must be strictly equal,
and its subtypes must also be acceptable types.
e.g. if the elements are tuples, the length of the tuples must be equal, the element types
of the tuples must be equal in order.
The built-in acceptable atomic element types contains:
**int**, **long**, **str**, **unicode**, **bool**,
**float**, **bytearray**, **datetime.date**, **datetime.time**, **datetime.datetime**,
**datetime.timedelta**, **decimal.Decimal**
The built-in acceptable composite element types contains:
**list**, **tuple**, **dict**, **array**, :class:`~pyflink.table.Row`
If the element type is a composite type, it will be unboxed.
e.g. table_env.from_elements([(1, 'Hi'), (2, 'Hello')]) will return a table like:
+----+-------+
| _1 | _2 |
+====+=======+
| 1 | Hi |
+----+-------+
| 2 | Hello |
+----+-------+
"_1" and "_2" are generated field names.
Example:
::
# use the second parameter to specify custom field names
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['a', 'b'])
# use the second parameter to specify custom table schema
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING())]))
# use the thrid parameter to switch whether to verify the elements against the schema
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING())]),
... False)
# create Table from expressions
>>> table_env.from_elements([row(1, 'abc', 2.0), row(2, 'def', 3.0)],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING()),
... DataTypes.FIELD("c", DataTypes.FLOAT())]))
:param elements: The elements to create a table from.
:param schema: The schema of the table.
:param verify_schema: Whether to verify the elements against the schema.
:return: The result table.
"""
# verifies the elements against the specified schema
if isinstance(schema, RowType):
verify_func = _create_type_verifier(schema) if verify_schema else lambda _: True
def verify_obj(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
data_type = schema
schema = RowType().add("value", schema)
verify_func = _create_type_verifier(
data_type, name="field value") if verify_schema else lambda _: True
def verify_obj(obj):
verify_func(obj)
return obj
else:
def verify_obj(obj):
return obj
# infers the schema if not specified
if schema is None or isinstance(schema, (list, tuple)):
schema = _infer_schema_from_data(elements, names=schema)
converter = _create_converter(schema)
elements = map(converter, elements)
elif not isinstance(schema, RowType):
raise TypeError(
"schema should be RowType, list, tuple or None, but got: %s" % schema)
elements = list(elements)
# in case all the elements are expressions
if len(elements) > 0 and all(isinstance(elem, Expression) for elem in elements):
if schema is None:
return Table(self._j_tenv.fromValues(to_expression_jarray(elements)), self)
else:
return Table(self._j_tenv.fromValues(_to_java_data_type(schema),
to_expression_jarray(elements)),
self)
elif any(isinstance(elem, Expression) for elem in elements):
raise ValueError("It doesn't support part of the elements are Expression, while the "
"others are not.")
# verifies the elements against the specified schema
elements = map(verify_obj, elements)
# converts python data to sql data
elements = [schema.to_sql_type(element) for element in elements]
return self._from_elements(elements, schema)
def _from_elements(self, elements: List, schema: Union[DataType, List[str]]) -> Table:
"""
Creates a table from a collection of elements.
:param elements: The elements to create a table from.
:return: The result :class:`~pyflink.table.Table`.
"""
# serializes to a file, and we read the file in java
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = BatchedSerializer(self._serializer)
try:
with temp_file:
serializer.serialize(elements, temp_file)
row_type_info = _to_java_type(schema)
execution_config = self._get_j_env().getConfig()
gateway = get_gateway()
j_objs = gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name, True)
if self._is_blink_planner:
PythonTableUtils = gateway.jvm \
.org.apache.flink.table.planner.utils.python.PythonTableUtils
PythonInputFormatTableSource = gateway.jvm \
.org.apache.flink.table.planner.utils.python.PythonInputFormatTableSource
else:
PythonTableUtils = gateway.jvm.PythonTableUtils
PythonInputFormatTableSource = gateway.jvm.PythonInputFormatTableSource
j_input_format = PythonTableUtils.getInputFormat(
j_objs, row_type_info, execution_config)
j_table_source = PythonInputFormatTableSource(
j_input_format, row_type_info)
return Table(self._j_tenv.fromTableSource(j_table_source), self)
finally:
os.unlink(temp_file.name)
def from_pandas(self, pdf,
schema: Union[RowType, List[str], Tuple[str], List[DataType],
Tuple[DataType]] = None,
splits_num: int = 1) -> Table:
"""
Creates a table from a pandas DataFrame.
Example:
::
>>> pdf = pd.DataFrame(np.random.rand(1000, 2))
# use the second parameter to specify custom field names
>>> table_env.from_pandas(pdf, ["a", "b"])
# use the second parameter to specify custom field types
>>> table_env.from_pandas(pdf, [DataTypes.DOUBLE(), DataTypes.DOUBLE()]))
# use the second parameter to specify custom table schema
>>> table_env.from_pandas(pdf,
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.DOUBLE()),
... DataTypes.FIELD("b", DataTypes.DOUBLE())]))
:param pdf: The pandas DataFrame.
:param schema: The schema of the converted table.
:param splits_num: The number of splits the given Pandas DataFrame will be split into. It
determines the number of parallel source tasks.
If not specified, the default parallelism will be used.
:return: The result table.
.. versionadded:: 1.11.0
"""
if not self._is_blink_planner and isinstance(self, BatchTableEnvironment):
raise TypeError("It doesn't support to convert from Pandas DataFrame in the batch "
"mode of old planner")
import pandas as pd
if not isinstance(pdf, pd.DataFrame):
raise TypeError("Unsupported type, expected pandas.DataFrame, got %s" % type(pdf))
import pyarrow as pa
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
if schema is not None:
if isinstance(schema, RowType):
result_type = schema
elif isinstance(schema, (list, tuple)) and isinstance(schema[0], str):
result_type = RowType(
[RowField(field_name, from_arrow_type(field.type, field.nullable))
for field_name, field in zip(schema, arrow_schema)])
elif isinstance(schema, (list, tuple)) and isinstance(schema[0], DataType):
result_type = RowType(
[RowField(field_name, field_type) for field_name, field_type in zip(
arrow_schema.names, schema)])
else:
raise TypeError("Unsupported schema type, it could only be of RowType, a "
"list of str or a list of DataType, got %s" % schema)
else:
result_type = RowType([RowField(field.name, from_arrow_type(field.type, field.nullable))
for field in arrow_schema])
# serializes to a file, and we read the file in java
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
import pytz
serializer = ArrowSerializer(
create_arrow_schema(result_type.field_names(), result_type.field_types()),
result_type,
pytz.timezone(self.get_config().get_local_timezone()))
step = -(-len(pdf) // splits_num)
pdf_slices = [pdf.iloc[start:start + step] for start in range(0, len(pdf), step)]
data = [[c for (_, c) in pdf_slice.iteritems()] for pdf_slice in pdf_slices]
try:
with temp_file:
serializer.serialize(data, temp_file)
jvm = get_gateway().jvm
data_type = jvm.org.apache.flink.table.types.utils.TypeConversions\
.fromLegacyInfoToDataType(_to_java_type(result_type)).notNull()
if self._is_blink_planner:
data_type = data_type.bridgedTo(
load_java_class('org.apache.flink.table.data.RowData'))
j_arrow_table_source = \
jvm.org.apache.flink.table.runtime.arrow.ArrowUtils.createArrowTableSource(
data_type, temp_file.name)
return Table(self._j_tenv.fromTableSource(j_arrow_table_source), self)
finally:
os.unlink(temp_file.name)
def _set_python_executable_for_local_executor(self):
jvm = get_gateway().jvm
j_config = get_j_env_configuration(self._get_j_env())
if not j_config.containsKey(jvm.PythonOptions.PYTHON_EXECUTABLE.key()) \
and is_local_deployment(j_config):
j_config.setString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), sys.executable)
def _add_jars_to_j_env_config(self, config_key):
jvm = get_gateway().jvm
jar_urls = self.get_config().get_configuration().get_string(config_key, None)
if jar_urls is not None:
# normalize and remove duplicates
jar_urls_set = set([jvm.java.net.URL(url).toString() for url in jar_urls.split(";")])
j_configuration = get_j_env_configuration(self._get_j_env())
if j_configuration.containsKey(config_key):
for url in j_configuration.getString(config_key, "").split(";"):
jar_urls_set.add(url)
j_configuration.setString(config_key, ";".join(jar_urls_set))
def _get_j_env(self):
if self._is_blink_planner:
return self._j_tenv.getPlanner().getExecEnv()
else:
try:
return self._j_tenv.execEnv()
except:
return self._j_tenv.getPlanner().getExecutionEnvironment()
@staticmethod
def _is_table_function(java_function):
java_function_class = java_function.getClass()
j_table_function_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.functions.TableFunction)
return j_table_function_class.isAssignableFrom(java_function_class)
@staticmethod
def _is_aggregate_function(java_function):
java_function_class = java_function.getClass()
j_aggregate_function_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.functions.ImperativeAggregateFunction)
return j_aggregate_function_class.isAssignableFrom(java_function_class)
def _register_table_function(self, name, table_function):
function_catalog = self._get_function_catalog()
gateway = get_gateway()
helper = gateway.jvm.org.apache.flink.table.functions.UserDefinedFunctionHelper
result_type = helper.getReturnTypeOfTableFunction(table_function)
function_catalog.registerTempSystemTableFunction(name, table_function, result_type)
def _register_aggregate_function(self, name, aggregate_function):
function_catalog = self._get_function_catalog()
gateway = get_gateway()
helper = gateway.jvm.org.apache.flink.table.functions.UserDefinedFunctionHelper
result_type = helper.getReturnTypeOfAggregateFunction(aggregate_function)
acc_type = helper.getAccumulatorTypeOfAggregateFunction(aggregate_function)
function_catalog.registerTempSystemAggregateFunction(
name, aggregate_function, result_type, acc_type)
def _get_function_catalog(self):
function_catalog_field = self._j_tenv.getClass().getDeclaredField("functionCatalog")
function_catalog_field.setAccessible(True)
function_catalog = function_catalog_field.get(self._j_tenv)
return function_catalog
def _before_execute(self):
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
self._add_jars_to_j_env_config(jars_key)
self._add_jars_to_j_env_config(classpaths_key)
def _wrap_aggregate_function_if_needed(self, function) -> UserDefinedFunctionWrapper:
if isinstance(function, (AggregateFunction, TableAggregateFunction,
UserDefinedAggregateFunctionWrapper)):
if not self._is_blink_planner:
raise Exception("Python UDAF and UDTAF are only supported in blink planner")
if isinstance(function, AggregateFunction):
function = udaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
elif isinstance(function, TableAggregateFunction):
function = udtaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
return function
class StreamTableEnvironment(TableEnvironment):
def __init__(self, j_tenv):
super(StreamTableEnvironment, self).__init__(j_tenv)
self._j_tenv = j_tenv
@staticmethod
def create(stream_execution_environment: StreamExecutionEnvironment = None, # type: ignore
table_config: TableConfig = None,
environment_settings: EnvironmentSettings = None) -> 'StreamTableEnvironment':
"""
Creates a :class:`~pyflink.table.StreamTableEnvironment`.
Example:
::
# create with StreamExecutionEnvironment.
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> table_env = StreamTableEnvironment.create(env)
# create with StreamExecutionEnvironment and TableConfig.
>>> table_config = TableConfig()
>>> table_config.set_null_check(False)
>>> table_env = StreamTableEnvironment.create(env, table_config)
# create with StreamExecutionEnvironment and EnvironmentSettings.
>>> environment_settings = EnvironmentSettings.new_instance().use_blink_planner() \\
... .build()
>>> table_env = StreamTableEnvironment.create(
... env, environment_settings=environment_settings)
# create with EnvironmentSettings.
>>> table_env = StreamTableEnvironment.create(environment_settings=environment_settings)
:param stream_execution_environment: The
:class:`~pyflink.datastream.StreamExecutionEnvironment`
of the TableEnvironment.
:param table_config: The configuration of the TableEnvironment, optional.
:param environment_settings: The environment settings used to instantiate the
TableEnvironment. It provides the interfaces about planner
selection(flink or blink), optional.
:return: The StreamTableEnvironment created from given StreamExecutionEnvironment and
configuration.
"""
if stream_execution_environment is None and \
table_config is None and \
environment_settings is None:
raise ValueError("No argument found, the param 'stream_execution_environment' "
"or 'environment_settings' is required.")
elif stream_execution_environment is None and \
table_config is not None and \
environment_settings is None:
raise ValueError("Only the param 'table_config' is found, "
"the param 'stream_execution_environment' is also required.")
if table_config is not None and \
environment_settings is not None:
raise ValueError("The param 'table_config' and "
"'environment_settings' cannot be used at the same time")
gateway = get_gateway()
if environment_settings is not None:
if not environment_settings.is_streaming_mode():
raise ValueError("The environment settings for StreamTableEnvironment must be "
"set to streaming mode.")
if stream_execution_environment is None:
j_tenv = gateway.jvm.TableEnvironment.create(
environment_settings._j_environment_settings)
else:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment,
environment_settings._j_environment_settings)
else:
if table_config is not None:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment,
table_config._j_table_config)
else:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment)
return StreamTableEnvironment(j_tenv)
def from_data_stream(self, data_stream: DataStream, *fields: Union[str, Expression]) -> Table:
"""
Converts the given DataStream into a Table with specified field names.
There are two modes for mapping original fields to the fields of the Table:
1. Reference input fields by name:
All fields in the schema definition are referenced by name (and possibly renamed using
and alias (as). Moreover, we can define proctime and rowtime attributes at arbitrary
positions using arbitrary names (except those that exist in the result schema). In this
mode, fields can be reordered and projected out. This mode can be used for any input
type.
2. Reference input fields by position:
In this mode, fields are simply renamed. Event-time attributes can replace the field on
their position in the input data (if it is of correct type) or be appended at the end.
Proctime attributes must be appended at the end. This mode can only be used if the input
type has a defined field order (tuple, case class, Row) and none of the fields
references a field of the input type.
:param data_stream: The datastream to be converted.
:param fields: The fields expressions to map original fields of the DataStream to the fields
of the Table
:return: The converted Table.
.. versionadded:: 1.12.0
"""
j_data_stream = data_stream._j_data_stream
JPythonConfigUtil = get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.declareManagedMemory(
j_data_stream.getTransformation(),
self._get_j_env(),
self._j_tenv.getConfig())
if len(fields) == 0:
return Table(j_table=self._j_tenv.fromDataStream(j_data_stream), t_env=self)
elif all(isinstance(f, Expression) for f in fields):
return Table(j_table=self._j_tenv.fromDataStream(
j_data_stream, to_expression_jarray(fields)), t_env=self)
elif len(fields) == 1 and isinstance(fields[0], str):
warnings.warn(
"Deprecated in 1.12. Use from_data_stream(DataStream, *Expression) instead.",
DeprecationWarning)
return Table(j_table=self._j_tenv.fromDataStream(j_data_stream, fields[0]), t_env=self)
raise ValueError("Invalid arguments for 'fields': %r" % fields)
def to_append_stream(self, table: Table, type_info: TypeInformation) -> DataStream:
"""
Converts the given Table into a DataStream of a specified type. The Table must only have
insert (append) changes. If the Table is also modified by update or delete changes, the
conversion will fail.
The fields of the Table are mapped to DataStream as follows: Row and Tuple types: Fields are
mapped by position, field types must match.
:param table: The Table to convert.
:param type_info: The TypeInformation that specifies the type of the DataStream.
:return: The converted DataStream.
.. versionadded:: 1.12.0
"""
j_data_stream = self._j_tenv.toAppendStream(table._j_table, type_info.get_java_type_info())
return DataStream(j_data_stream=j_data_stream)
def to_retract_stream(self, table: Table, type_info: TypeInformation) -> DataStream:
"""
Converts the given Table into a DataStream of add and retract messages. The message will be
encoded as Tuple. The first field is a boolean flag, the second field holds the record of
the specified type.
A true flag indicates an add message, a false flag indicates a retract message.
The fields of the Table are mapped to DataStream as follows: Row and Tuple types: Fields are
mapped by position, field types must match.
:param table: The Table to convert.
:param type_info: The TypeInformation of the requested record type.
:return: The converted DataStream.
.. versionadded:: 1.12.0
"""
j_data_stream = self._j_tenv.toRetractStream(table._j_table, type_info.get_java_type_info())
return DataStream(j_data_stream=j_data_stream)
class BatchTableEnvironment(TableEnvironment):
"""
.. note:: BatchTableEnvironment will be dropped in Flink 1.14 because it only supports the old
planner. Use the unified :class:`~pyflink.table.TableEnvironment` instead, which
supports both batch and streaming. More advanced operations previously covered by
the DataSet API can now use the DataStream API in BATCH execution mode.
"""
def __init__(self, j_tenv):
super(BatchTableEnvironment, self).__init__(j_tenv)
self._j_tenv = j_tenv
def connect(self, connector_descriptor: ConnectorDescriptor) -> \
Union[BatchTableDescriptor, StreamTableDescriptor]:
"""
Creates a temporary table from a descriptor.
Descriptors allow for declaring the communication to external systems in an
implementation-agnostic way. The classpath is scanned for suitable table factories that
match the desired configuration.
The following example shows how to read from a connector using a JSON format and
registering a temporary table as "MyTable":
::
>>> table_env \\
... .connect(ExternalSystemXYZ()
... .version("0.11")) \\
... .with_format(Json()
... .json_schema("{...}")
... .fail_on_missing_field(False)) \\
... .with_schema(Schema()
... .field("user-name", "VARCHAR")
... .from_origin_field("u_name")
... .field("count", "DECIMAL")) \\
... .create_temporary_table("MyTable")
:param connector_descriptor: Connector descriptor describing the external system.
:return: A :class:`~pyflink.table.descriptors.BatchTableDescriptor` or a
:class:`~pyflink.table.descriptors.StreamTableDescriptor` (for blink planner) used
to build the temporary table.
.. note:: Deprecated in 1.11. Use :func:`execute_sql` to register a table instead.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql instead.", DeprecationWarning)
gateway = get_gateway()
blink_t_env_class = get_java_class(
gateway.jvm.org.apache.flink.table.api.internal.TableEnvironmentImpl)
if blink_t_env_class == self._j_tenv.getClass():
return StreamTableDescriptor(
self._j_tenv.connect(connector_descriptor._j_connector_descriptor))
else:
return BatchTableDescriptor(
self._j_tenv.connect(connector_descriptor._j_connector_descriptor))
@staticmethod
def create(execution_environment: ExecutionEnvironment = None, # type: ignore
table_config: TableConfig = None,
environment_settings: EnvironmentSettings = None) -> 'BatchTableEnvironment':
"""
Creates a :class:`~pyflink.table.BatchTableEnvironment`.
Example:
::
# create with ExecutionEnvironment.
>>> env = ExecutionEnvironment.get_execution_environment()
>>> table_env = BatchTableEnvironment.create(env)
# create with ExecutionEnvironment and TableConfig.
>>> table_config = TableConfig()
>>> table_config.set_null_check(False)
>>> table_env = BatchTableEnvironment.create(env, table_config)
# create with EnvironmentSettings.
>>> environment_settings = EnvironmentSettings.new_instance().in_batch_mode() \\
... .use_blink_planner().build()
>>> table_env = BatchTableEnvironment.create(environment_settings=environment_settings)
:param execution_environment: The batch :class:`~pyflink.dataset.ExecutionEnvironment` of
the TableEnvironment.
:param table_config: The configuration of the TableEnvironment, optional.
:param environment_settings: The environment settings used to instantiate the
TableEnvironment. It provides the interfaces about planner
selection(flink or blink), optional.
:return: The BatchTableEnvironment created from given ExecutionEnvironment and
configuration.
.. note:: This part of the API will be dropped in Flink 1.14 because it only supports the
old planner. Use the unified :class:`~pyflink.table.TableEnvironment` instead, it
supports both batch and streaming. For more advanced operations, the new batch
mode of the DataStream API might be useful.
"""
warnings.warn(
"Deprecated in 1.14. Use the unified TableEnvironment instead.",
DeprecationWarning)
if execution_environment is None and \
table_config is None and \
environment_settings is None:
raise ValueError("No argument found, the param 'execution_environment' "
"or 'environment_settings' is required.")
elif execution_environment is None and \
table_config is not None and \
environment_settings is None:
raise ValueError("Only the param 'table_config' is found, "
"the param 'execution_environment' is also required.")
elif execution_environment is not None and \
environment_settings is not None:
raise ValueError("The param 'execution_environment' and "
"'environment_settings' cannot be used at the same time")
elif table_config is not None and \
environment_settings is not None:
raise ValueError("The param 'table_config' and "
"'environment_settings' cannot be used at the same time")
gateway = get_gateway()
if environment_settings is not None:
if environment_settings.is_streaming_mode():
raise ValueError("The environment settings for BatchTableEnvironment must be "
"set to batch mode.")
JEnvironmentSettings = get_gateway().jvm.org.apache.flink.table.api.EnvironmentSettings
old_planner_class_name = EnvironmentSettings.new_instance().in_batch_mode() \
.use_old_planner().build()._j_environment_settings \
.toPlannerProperties()[JEnvironmentSettings.CLASS_NAME]
planner_properties = environment_settings._j_environment_settings.toPlannerProperties()
if JEnvironmentSettings.CLASS_NAME in planner_properties and \
planner_properties[JEnvironmentSettings.CLASS_NAME] == old_planner_class_name:
# The Java EnvironmentSettings API does not support creating table environment with
# old planner. Create it from other API.
j_tenv = gateway.jvm.BatchTableEnvironment.create(
ExecutionEnvironment.get_execution_environment()._j_execution_environment)
else:
j_tenv = gateway.jvm.TableEnvironment.create(
environment_settings._j_environment_settings)
else:
if table_config is None:
j_tenv = gateway.jvm.BatchTableEnvironment.create(
execution_environment._j_execution_environment)
else:
j_tenv = gateway.jvm.BatchTableEnvironment.create(
execution_environment._j_execution_environment,
table_config._j_table_config)
return BatchTableEnvironment(j_tenv)
| apache-2.0 |
kernc/scikit-learn | examples/model_selection/randomized_search.py | 44 | 3253 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
zbanga/trading-with-python | lib/interactivebrokers.py | 77 | 18140 | """
Copyright: Jev Kuznetsov
Licence: BSD
Interface to interactive brokers together with gui widgets
"""
import sys
# import os
from time import sleep
from PyQt4.QtCore import (SIGNAL, SLOT)
from PyQt4.QtGui import (QApplication, QFileDialog, QDialog, QVBoxLayout, QHBoxLayout, QDialogButtonBox,
QTableView, QPushButton, QWidget, QLabel, QLineEdit, QGridLayout, QHeaderView)
import ib
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from ib.ext.Order import Order
import logger as logger
from qtpandas import DataFrameModel, TableView
from eventSystem import Sender
import numpy as np
import pandas
from pandas import DataFrame, Index
from datetime import datetime
import os
import datetime as dt
import time
priceTicks = {1: 'bid', 2: 'ask', 4: 'last', 6: 'high', 7: 'low', 9: 'close', 14: 'open'}
timeFormat = "%Y%m%d %H:%M:%S"
dateFormat = "%Y%m%d"
def createContract(symbol, secType='STK', exchange='SMART', currency='USD'):
""" contract factory function """
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = secType
contract.m_exchange = exchange
contract.m_currency = currency
return contract
def _str2datetime(s):
""" convert string to datetime """
return datetime.strptime(s, '%Y%m%d')
def readActivityFlex(fName):
"""
parse trade log in a csv file produced by IB 'Activity Flex Query'
the file should contain these columns:
['Symbol','TradeDate','Quantity','TradePrice','IBCommission']
Returns:
A DataFrame with parsed trade data
"""
import csv
rows = []
with open(fName, 'rb') as f:
reader = csv.reader(f)
for row in reader:
rows.append(row)
header = ['TradeDate', 'Symbol', 'Quantity', 'TradePrice', 'IBCommission']
types = dict(zip(header, [_str2datetime, str, int, float, float]))
idx = dict(zip(header, [rows[0].index(h) for h in header]))
data = dict(zip(header, [[] for h in header]))
for row in rows[1:]:
print row
for col in header:
val = types[col](row[idx[col]])
data[col].append(val)
return DataFrame(data)[header].sort(column='TradeDate')
class Subscriptions(DataFrameModel, Sender):
""" a data table containing price & subscription data """
def __init__(self, tws=None):
super(Subscriptions, self).__init__()
self.df = DataFrame() # this property holds the data in a table format
self._nextId = 1
self._id2symbol = {} # id-> symbol lookup dict
self._header = ['id', 'position', 'bid', 'ask', 'last'] # columns of the _data table
# register callbacks
if tws is not None:
tws.register(self.priceHandler, message.TickPrice)
tws.register(self.accountHandler, message.UpdatePortfolio)
def add(self, symbol, subId=None):
"""
Add a subscription to data table
return : subscription id
"""
if subId is None:
subId = self._nextId
data = dict(zip(self._header, [subId, 0, np.nan, np.nan, np.nan]))
row = DataFrame(data, index=Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
self._nextId = subId + 1
self._rebuildIndex()
self.emit(SIGNAL("layoutChanged()"))
return subId
def priceHandler(self, msg):
""" handler function for price updates. register this with ibConnection class """
if priceTicks[msg.field] not in self._header: # do nothing for ticks that are not in _data table
return
self.df[priceTicks[msg.field]][self._id2symbol[msg.tickerId]] = msg.price
#notify viewer
col = self._header.index(priceTicks[msg.field])
row = self.df.index.tolist().index(self._id2symbol[msg.tickerId])
idx = self.createIndex(row, col)
self.emit(SIGNAL("dataChanged(QModelIndex,QModelIndex)"), idx, idx)
def accountHandler(self, msg):
if msg.contract.m_symbol in self.df.index.tolist():
self.df['position'][msg.contract.m_symbol] = msg.position
def _rebuildIndex(self):
""" udate lookup dictionary id-> symbol """
symbols = self.df.index.tolist()
ids = self.df['id'].values.tolist()
self._id2symbol = dict(zip(ids, symbols))
def __repr__(self):
return str(self.df)
class Broker(object):
"""
Broker class acts as a wrapper around ibConnection
from ibPy. It tracks current subscriptions and provides
data models to viewiers .
"""
def __init__(self, name='broker'):
""" initialize broker class
"""
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self.tws = ibConnection() # tws interface
self.nextValidOrderId = None
self.dataModel = Subscriptions(self.tws) # data container
self.tws.registerAll(self.defaultHandler)
#self.tws.register(self.debugHandler,message.TickPrice)
self.tws.register(self.nextValidIdHandler, 'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True, '')
def subscribeStk(self, symbol, secType='STK', exchange='SMART', currency='USD'):
""" subscribe to stock data """
self.log.debug('Subscribing to ' + symbol)
# if symbol in self.data.symbols:
# print 'Already subscribed to {0}'.format(symbol)
# return
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self.dataModel.add(symbol)
self.tws.reqMktData(subId, c, '', False)
self.contracts[symbol] = c
return subId
@property
def data(self):
return self.dataModel.df
def placeOrder(self, symbol, shares, limit=None, exchange='SMART', transmit=0):
""" place an order on already subscribed contract """
if symbol not in self.contracts.keys():
self.log.error("Can't place order, not subscribed to %s" % symbol)
return
action = {-1: 'SELL', 1: 'BUY'}
o = Order()
o.m_orderId = self.getOrderId()
o.m_action = action[cmp(shares, 0)]
o.m_totalQuantity = abs(shares)
o.m_transmit = transmit
if limit is not None:
o.m_orderType = 'LMT'
o.m_lmtPrice = limit
self.log.debug('Placing %s order for %i %s (id=%i)' % (o.m_action, o.m_totalQuantity, symbol, o.m_orderId))
self.tws.placeOrder(o.m_orderId, self.contracts[symbol], o)
def getOrderId(self):
self.nextValidOrderId += 1
return self.nextValidOrderId - 1
def unsubscribeStk(self, symbol):
self.log.debug('Function not implemented')
def disconnect(self):
self.tws.disconnect()
def __del__(self):
"""destructor, clean up """
print 'Broker is cleaning up after itself.'
self.tws.disconnect()
def debugHandler(self, msg):
print msg
def defaultHandler(self, msg):
""" default message handler """
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def nextValidIdHandler(self, msg):
self.nextValidOrderId = msg.orderId
self.log.debug('Next valid order id:{0}'.format(self.nextValidOrderId))
def saveData(self, fname):
""" save current dataframe to csv """
self.log.debug("Saving data to {0}".format(fname))
self.dataModel.df.to_csv(fname)
# def __getattr__(self, name):
# """ x.__getattr__('name') <==> x.name
# an easy way to call ibConnection methods
# @return named attribute from instance tws
# """
# return getattr(self.tws, name)
class _HistDataHandler(object):
""" handles incoming messages """
def __init__(self, tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler, message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open': [], 'high': [], 'low': [], 'close': [], 'volume': [], 'count': [], 'WAP': []}
def msgHandler(self, msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
if len(msg.date) > 8:
self._timestamp.append(dt.datetime.strptime(msg.date, timeFormat))
else:
self._timestamp.append(dt.datetime.strptime(msg.date, dateFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
""" return downloaded data as a DataFrame """
df = DataFrame(data=self._data, index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self, debug=False):
self._log = logger.getLogger('DLD')
self._log.debug(
'Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__, ib.version))
self.tws = ibConnection()
self._dataHandler = _HistDataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler, message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self, msg):
print '[debug]', msg
def requestData(self, contract, endDateTime, durationStr='1 D', barSizeSetting='30 secs', whatToShow='TRADES',
useRTH=1, formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol, endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(10)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH,
formatDate)
self._reqId += 1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time() - startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self, contract, dateTuple):
""" get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
"""
openTime = dt.datetime(*dateTuple) + dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple) + dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime, closeTime, freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract, t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
class TimeKeeper(object):
def __init__(self):
self._log = logger.getLogger('TK')
dataDir = os.path.expanduser('~') + '/twpData'
if not os.path.exists(dataDir):
os.mkdir(dataDir)
self._timeFormat = "%Y%m%d %H:%M:%S"
self.dataFile = os.path.normpath(os.path.join(dataDir, 'requests.txt'))
self._log.debug('Data file: {0}'.format(self.dataFile))
def addRequest(self):
""" adds a timestamp of current request"""
with open(self.dataFile, 'a') as f:
f.write(dt.datetime.now().strftime(self._timeFormat) + '\n')
def nrRequests(self, timeSpan=600):
""" return number of requests in past timespan (s) """
delta = dt.timedelta(seconds=timeSpan)
now = dt.datetime.now()
requests = 0
with open(self.dataFile, 'r') as f:
lines = f.readlines()
for line in lines:
if now - dt.datetime.strptime(line.strip(), self._timeFormat) < delta:
requests += 1
if requests == 0: # erase all contents if no requests are relevant
open(self.dataFile, 'w').close()
self._log.debug('past requests: {0}'.format(requests))
return requests
#---------------test functions-----------------
def dummyHandler(msg):
print msg
def testConnection():
""" a simple test to check working of streaming prices etc """
tws = ibConnection()
tws.registerAll(dummyHandler)
tws.connect()
c = createContract('SPY')
tws.reqMktData(1, c, '', False)
sleep(3)
print 'testConnection done.'
def testSubscriptions():
s = Subscriptions()
s.add('SPY')
#s.add('XLE')
print s
def testBroker():
b = Broker()
sleep(2)
b.subscribeStk('SPY')
b.subscribeStk('XLE')
b.subscribeStk('GOOG')
b.placeOrder('ABC', 125, 55.1)
sleep(3)
return b
#---------------------GUI stuff--------------------------------------------
class AddSubscriptionDlg(QDialog):
def __init__(self, parent=None):
super(AddSubscriptionDlg, self).__init__(parent)
symbolLabel = QLabel('Symbol')
self.symbolEdit = QLineEdit()
secTypeLabel = QLabel('secType')
self.secTypeEdit = QLineEdit('STK')
exchangeLabel = QLabel('exchange')
self.exchangeEdit = QLineEdit('SMART')
currencyLabel = QLabel('currency')
self.currencyEdit = QLineEdit('USD')
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
lay = QGridLayout()
lay.addWidget(symbolLabel, 0, 0)
lay.addWidget(self.symbolEdit, 0, 1)
lay.addWidget(secTypeLabel, 1, 0)
lay.addWidget(self.secTypeEdit, 1, 1)
lay.addWidget(exchangeLabel, 2, 0)
lay.addWidget(self.exchangeEdit, 2, 1)
lay.addWidget(currencyLabel, 3, 0)
lay.addWidget(self.currencyEdit, 3, 1)
lay.addWidget(buttonBox, 4, 0, 1, 2)
self.setLayout(lay)
self.connect(buttonBox, SIGNAL("accepted()"),
self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"),
self, SLOT("reject()"))
self.setWindowTitle("Add subscription")
class BrokerWidget(QWidget):
def __init__(self, broker, parent=None):
super(BrokerWidget, self).__init__(parent)
self.broker = broker
self.dataTable = TableView()
self.dataTable.setModel(self.broker.dataModel)
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
#self.dataTable.resizeColumnsToContents()
dataLabel = QLabel('Price Data')
dataLabel.setBuddy(self.dataTable)
dataLayout = QVBoxLayout()
dataLayout.addWidget(dataLabel)
dataLayout.addWidget(self.dataTable)
addButton = QPushButton("&Add Symbol")
saveDataButton = QPushButton("&Save Data")
#deleteButton = QPushButton("&Delete")
buttonLayout = QVBoxLayout()
buttonLayout.addWidget(addButton)
buttonLayout.addWidget(saveDataButton)
buttonLayout.addStretch()
layout = QHBoxLayout()
layout.addLayout(dataLayout)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.connect(addButton, SIGNAL('clicked()'), self.addSubscription)
self.connect(saveDataButton, SIGNAL('clicked()'), self.saveData)
#self.connect(deleteButton,SIGNAL('clicked()'),self.deleteSubscription)
def addSubscription(self):
dialog = AddSubscriptionDlg(self)
if dialog.exec_():
self.broker.subscribeStk(str(dialog.symbolEdit.text()), str(dialog.secTypeEdit.text()),
str(dialog.exchangeEdit.text()), str(dialog.currencyEdit.text()))
def saveData(self):
""" save data to a .csv file """
fname = unicode(QFileDialog.getSaveFileName(self, caption="Save data to csv", filter='*.csv'))
if fname:
self.broker.saveData(fname)
# def deleteSubscription(self):
# pass
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.resize(640, 480)
self.setWindowTitle('Broker test')
self.broker = Broker()
self.broker.subscribeStk('SPY')
self.broker.subscribeStk('XLE')
self.broker.subscribeStk('GOOG')
brokerWidget = BrokerWidget(self.broker, self)
lay = QVBoxLayout()
lay.addWidget(brokerWidget)
self.setLayout(lay)
def startGui():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
import ib
print 'iby version:', ib.version
#testConnection()
#testBroker()
#testSubscriptions()
print message.messageTypeNames()
startGui()
print 'All done'
| bsd-3-clause |
architecture-building-systems/CityEnergyAnalyst | cea/plots/demand/energy_balance.py | 2 | 12475 |
import plotly.graph_objs as go
from plotly.offline import plot
from cea.plots.variable_naming import LOGO, COLOR, NAMING
import cea.plots.demand
import pandas as pd
import numpy as np
__author__ = "Gabriel Happle"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Gabriel Happle", "Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "2.8"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
class EnergyBalancePlot(cea.plots.demand.DemandSingleBuildingPlotBase):
name = "Energy balance"
def __init__(self, project, parameters, cache):
super(EnergyBalancePlot, self).__init__(project, parameters, cache)
if len(self.buildings) > 1:
self.buildings = [self.buildings[0]]
self.analysis_fields = ['I_sol_kWh',
'Qhs_tot_sen_kWh',
'Qhs_loss_sen_kWh',
'Q_gain_lat_peop_kWh',
'Q_gain_sen_light_kWh',
'Q_gain_sen_app_kWh',
'Q_gain_sen_data_kWh',
'Q_gain_sen_peop_kWh',
'Q_gain_sen_wall_kWh',
'Q_gain_sen_base_kWh',
'Q_gain_sen_roof_kWh',
'Q_gain_sen_wind_kWh',
'Q_gain_sen_vent_kWh',
'Q_gain_lat_vent_kWh',
'I_rad_kWh',
'Qcs_tot_sen_kWh',
'Qcs_tot_lat_kWh',
'Qcs_loss_sen_kWh',
'Q_loss_sen_wall_kWh',
'Q_loss_sen_base_kWh',
'Q_loss_sen_roof_kWh',
'Q_loss_sen_wind_kWh',
'Q_loss_sen_vent_kWh',
'Q_loss_sen_ref_kWh']
self.__data_frame_month = None
@property
def layout(self):
return go.Layout(barmode='relative',
yaxis=dict(title='Energy balance [kWh/m2_GFA]'))
@property
def data_frame_month(self):
if self.__data_frame_month is None:
gfa_m2 = self.yearly_loads.set_index('Name').loc[self.buildings[0]]['GFA_m2']
hourly_loads_for_buildings = self.hourly_loads[self.hourly_loads['Name'].isin(self.buildings)]
self.__data_frame_month = calc_monthly_energy_balance(hourly_loads_for_buildings, gfa_m2)
return self.__data_frame_month
def calc_graph(self):
data_frame_month = self.data_frame_month
traces = []
for field in self.analysis_fields:
y = data_frame_month[field]
trace = go.Bar(x=data_frame_month.index, y=y, name=field.split('_kWh', 1)[0],
marker=dict(color=COLOR[field])) # , text = total_perc_txt)
traces.append(trace)
return traces
def calc_table(self):
"""
draws table of monthly energy balance
:param self
:return: table_df
"""
data_frame_month = self.data_frame_month
# create table arrays
name_month = np.append(data_frame_month.index, ['YEAR'])
total_heat = np.append(data_frame_month['Q_heat_sum'].values, data_frame_month['Q_heat_sum'].sum())
total_cool = np.append(data_frame_month['Q_cool_sum'], data_frame_month['Q_cool_sum'].sum())
balance = np.append(data_frame_month['Q_balance'], data_frame_month['Q_balance'].sum().round(2))
# draw table
column_names = ['Month', 'Total heat [kWh/m2_GFA]', 'Total cool [kWh/m2_GFA]', 'Delta [kWh/m2_GFA]']
column_data = [name_month, total_heat, total_cool, balance]
table_df = pd.DataFrame({cn: cd for cn, cd in zip(column_names, column_data)}, columns=column_names)
return table_df
def energy_balance(data_frame, analysis_fields, normalize_value, title, output_path):
# Calculate Energy Balance
data_frame_month = calc_monthly_energy_balance(data_frame, normalize_value)
# CALCULATE GRAPH
traces_graph = calc_graph(analysis_fields, data_frame_month)
# CALCULATE TABLE
traces_table = calc_table(data_frame_month)
# PLOT GRAPH
traces_graph.append(traces_table)
layout = go.Layout(images=LOGO, title=title, barmode='relative',
yaxis=dict(title='Energy balance [kWh/m2_GFA]', domain=[0.35, 1.0]))
fig = go.Figure(data=traces_graph, layout=layout)
plot(fig, auto_open=False, filename=output_path)
return {'data': traces_graph, 'layout': layout}
def calc_table(data_frame_month):
"""
draws table of monthly energy balance
:param data_frame_month: data frame of monthly building energy balance
:return:
"""
# create table arrays
name_month = np.append(data_frame_month.index, ['YEAR'])
total_heat = np.append(data_frame_month['Q_heat_sum'].values, data_frame_month['Q_heat_sum'].sum())
total_cool = np.append(data_frame_month['Q_cool_sum'], data_frame_month['Q_cool_sum'].sum())
balance = np.append(data_frame_month['Q_balance'], data_frame_month['Q_balance'].sum().round(2))
# draw table
table = go.Table(domain=dict(x=[0, 1], y=[0.0, 0.2]),
header=dict(values=['Month', 'Total heat [kWh/m2_GFA]', 'Total cool [kWh/m2_GFA]',
'Delta [kWh/m2_GFA]']),
cells=dict(values=[name_month, total_heat, total_cool, balance]))
return table
def calc_graph(analysis_fields, data_frame):
"""
draws building heat balance graph
:param analysis_fields:
:param data_frame:
:return:
"""
graph = []
for field in analysis_fields:
y = data_frame[field]
trace = go.Bar(x=data_frame.index, y=y, name=field.split('_kWh', 1)[0],
marker=dict(color=COLOR[field])) # , text = total_perc_txt)
graph.append(trace)
return graph
def calc_monthly_energy_balance(data_frame, normalize_value):
"""
calculates heat flux balance for buildings on hourly basis
:param data_frame: demand information of building in pd.DataFrame
:param normalize_value: value for normalization of thermal energy fluxes, usually GFA
:return:
"""
# invert the sign of I_rad. I_rad is a positive term in ISO 13790 and therefore positive in the rest of the code.
# I_rad is energy being irradiated from the building to the sky. It is a heat loss in the energy balance.
data_frame['I_rad_kWh'] = -data_frame['I_rad_kWh']
# calculate losses of heating and cooling system in data frame and adjust signs
data_frame['Qhs_loss_sen_kWh'] = -abs(data_frame['Qhs_em_ls_kWh'] + data_frame['Qhs_dis_ls_kWh'])
data_frame['Qhs_tot_sen_kWh'] = data_frame['Qhs_sen_sys_kWh'] + abs(data_frame['Qhs_loss_sen_kWh'])
data_frame['Qcs_loss_sen_kWh'] = -data_frame['Qcs_em_ls_kWh'] - data_frame['Qcs_dis_ls_kWh']
data_frame['Qcs_tot_sen_kWh'] = data_frame['Qcs_sen_sys_kWh'] - abs(data_frame['Qcs_loss_sen_kWh'])
# calculate the latent heat load. the latent heat load is assumed to be the load that the system serves
data_frame['Qcs_tot_lat_kWh'] = -data_frame['Qcs_lat_sys_kWh']
# split up R-C model heat fluxes into heating and cooling contributions
data_frame['Q_loss_sen_wall_kWh'] = data_frame["Q_gain_sen_wall_kWh"][data_frame["Q_gain_sen_wall_kWh"] < 0]
data_frame['Q_gain_sen_wall_kWh'] = data_frame["Q_gain_sen_wall_kWh"][data_frame["Q_gain_sen_wall_kWh"] > 0]
data_frame['Q_loss_sen_base_kWh'] = data_frame["Q_gain_sen_base_kWh"][data_frame["Q_gain_sen_base_kWh"] < 0]
data_frame['Q_gain_sen_base_kWh'] = data_frame["Q_gain_sen_base_kWh"][data_frame["Q_gain_sen_base_kWh"] > 0]
data_frame['Q_loss_sen_roof_kWh'] = data_frame["Q_gain_sen_roof_kWh"][data_frame["Q_gain_sen_roof_kWh"] < 0]
data_frame['Q_gain_sen_roof_kWh'] = data_frame["Q_gain_sen_roof_kWh"][data_frame["Q_gain_sen_roof_kWh"] > 0]
data_frame['Q_loss_sen_vent_kWh'] = data_frame["Q_gain_sen_vent_kWh"][data_frame["Q_gain_sen_vent_kWh"] < 0]
data_frame['Q_gain_sen_vent_kWh'] = data_frame["Q_gain_sen_vent_kWh"][data_frame["Q_gain_sen_vent_kWh"] > 0]
data_frame['Q_loss_sen_wind_kWh'] = data_frame["Q_gain_sen_wind_kWh"][data_frame["Q_gain_sen_wind_kWh"] < 0]
data_frame['Q_gain_sen_wind_kWh'] = data_frame["Q_gain_sen_wind_kWh"][data_frame["Q_gain_sen_wind_kWh"] > 0]
data_frame['Q_gain_sen_wall_kWh'].fillna(0, inplace=True)
data_frame['Q_gain_sen_base_kWh'].fillna(0, inplace=True)
data_frame['Q_gain_sen_roof_kWh'].fillna(0, inplace=True)
data_frame['Q_loss_sen_wall_kWh'].fillna(0, inplace=True)
data_frame['Q_loss_sen_base_kWh'].fillna(0, inplace=True)
data_frame['Q_loss_sen_roof_kWh'].fillna(0, inplace=True)
data_frame['Q_gain_sen_vent_kWh'].fillna(0, inplace=True)
data_frame['Q_loss_sen_vent_kWh'].fillna(0, inplace=True)
data_frame['Q_gain_sen_wind_kWh'].fillna(0, inplace=True)
data_frame['Q_loss_sen_wind_kWh'].fillna(0, inplace=True)
# convert to monthly
data_frame.index = pd.to_datetime(data_frame.index)
data_frame_month = data_frame.resample("M").sum() # still kWh
data_frame_month["month"] = data_frame_month.index.strftime("%B")
data_frame_month.set_index("month", inplace=True)
# calculate latent heat gains of people that are covered by the cooling system
# FIXME: This is kind of a fake balance, as months are compared (could be a significant share not in heating or cooling season)
for index, row in data_frame_month.iterrows():
# completely covered
if row['Qcs_tot_lat_kWh'] < 0 and abs(row['Qcs_lat_sys_kWh']) >= row['Q_gain_lat_peop_kWh']:
data_frame_month.at[index, 'Q_gain_lat_peop_kWh'] = row['Q_gain_lat_peop_kWh']
# partially covered (rest is ignored)
elif row['Qcs_tot_lat_kWh'] < 0 and abs(row['Qcs_tot_lat_kWh']) < row['Q_gain_lat_peop_kWh']:
data_frame_month.at[index, 'Q_gain_lat_peop_kWh'] = abs(row['Qcs_tot_lat_kWh'])
# no latent gains
elif row['Qcs_tot_lat_kWh'] == 0:
data_frame_month.at[index, 'Q_gain_lat_peop_kWh'] = 0.0
else:
data_frame_month.at[index, 'Q_gain_lat_peop_kWh'] = 0.0
data_frame_month['Q_gain_lat_vent_kWh'] = abs(data_frame_month['Qcs_lat_sys_kWh']) - data_frame_month[
'Q_gain_lat_peop_kWh']
# balance of heating
data_frame_month['Q_heat_sum'] = data_frame_month['Qhs_tot_sen_kWh'] + data_frame_month['Q_gain_sen_wall_kWh'] \
+ data_frame_month['Q_gain_sen_base_kWh'] + data_frame_month['Q_gain_sen_roof_kWh'] \
+ data_frame_month['Q_gain_sen_vent_kWh'] + data_frame_month['Q_gain_sen_wind_kWh']\
+ data_frame_month["Q_gain_sen_app_kWh"] + data_frame_month['Q_gain_sen_light_kWh']\
+ data_frame_month['Q_gain_sen_peop_kWh'] + data_frame_month['Q_gain_sen_data_kWh'] \
+ data_frame_month['I_sol_kWh'] + data_frame_month['Qcs_loss_sen_kWh']\
+ data_frame_month['Q_gain_lat_peop_kWh'] + data_frame_month['Q_gain_lat_vent_kWh']
# balance of cooling
data_frame_month['Q_cool_sum'] = data_frame_month['Qcs_tot_sen_kWh'] + data_frame_month['Q_loss_sen_wall_kWh'] \
+ data_frame_month['Q_loss_sen_base_kWh'] + data_frame_month['Q_loss_sen_roof_kWh']\
+ data_frame_month['Q_loss_sen_vent_kWh'] + data_frame_month['Q_loss_sen_wind_kWh']\
+ data_frame_month['I_rad_kWh'] + data_frame_month['Qhs_loss_sen_kWh']\
+ data_frame_month['Q_loss_sen_ref_kWh'] + data_frame_month['Qcs_tot_lat_kWh']
# total balance
data_frame_month['Q_balance'] = data_frame_month['Q_heat_sum'] + data_frame_month['Q_cool_sum']
# normalize by GFA
data_frame_month = data_frame_month / normalize_value
data_frame_month = data_frame_month.round(2)
return data_frame_month
def main():
import cea.config
import cea.plots.cache
config = cea.config.Configuration()
cache = cea.plots.cache.NullPlotCache()
EnergyBalancePlot(config.project, {'building': config.plots.building,
'scenario-name': config.scenario_name},
cache).plot(auto_open=True)
if __name__ == '__main__':
main()
| mit |
alphacsc/alphacsc | examples/csc/plot_simulate_csc.py | 1 | 3484 | """
=============================
Vanilla CSC on simulated data
=============================
This example demonstrates `vanilla` CSC on simulated data.
Note that `vanilla` CSC is just a special case of alphaCSC with alpha=2.
"""
# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Tom Dupre La Tour <tom.duprelatour@telecom-paristech.fr>
# Umut Simsekli <umut.simsekli@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
###############################################################################
# Let us first define the parameters of our model.
n_times_atom = 64 # L
n_times = 512 # T
n_atoms = 2 # K
n_trials = 100 # N
n_iter = 50
reg = 0.1
###############################################################################
# Here, we simulate the data
from alphacsc.simulate import simulate_data # noqa
random_state_simulate = 1
X, ds_true, z_true = simulate_data(n_trials, n_times, n_times_atom,
n_atoms, random_state_simulate)
###############################################################################
# Add some noise and corrupt some trials
from scipy.stats import levy_stable # noqa
from alphacsc import check_random_state # noqa
# Add stationary noise:
fraction_corrupted = 0.02
n_corrupted_trials = int(fraction_corrupted * n_trials)
rng = check_random_state(random_state_simulate)
X += 0.01 * rng.randn(*X.shape)
idx_corrupted = rng.randint(0, n_trials,
size=n_corrupted_trials)
###############################################################################
# Let us look at the first 10 trials to see how they look.
from alphacsc.viz.callback import plot_data # noqa
plot_data([X[:10]])
###############################################################################
# Note that the atoms don't always have the same amplitude or occur at the
# same time instant.
#
# Now, we run vanilla CSC on the data.
from alphacsc import learn_d_z # noqa
random_state = 60
pobj, times, d_hat, z_hat, reg = learn_d_z(
X, n_atoms, n_times_atom, reg=reg, n_iter=n_iter,
solver_d_kwargs=dict(factr=100), random_state=random_state,
n_jobs=1, verbose=1)
print('Vanilla CSC')
###############################################################################
# Finally, let's compare the results.
import matplotlib.pyplot as plt # noqa
plt.figure()
plt.plot(d_hat.T)
plt.plot(ds_true.T, 'k--')
###############################################################################
# We can also visualize the learned activations
plot_data([z[:10] for z in z_hat], ['stem'] * n_atoms)
###############################################################################
# Note if the data is corrupted with impulsive noise, this method may not be
# the best. Check out our :ref:`example using alphacsc
# <sphx_glr_auto_examples_plot_simulate_alphacsc.py>` to learn how to deal with
# such data.
alpha = 1.2
noise_level = 0.005
X[idx_corrupted] += levy_stable.rvs(alpha, 0, loc=0, scale=noise_level,
size=(n_corrupted_trials, n_times),
random_state=random_state_simulate)
pobj, times, d_hat, z_hat, reg = learn_d_z(
X, n_atoms, n_times_atom, reg=reg, n_iter=n_iter,
solver_d_kwargs=dict(factr=100), random_state=random_state,
n_jobs=1, verbose=1)
plt.figure()
plt.plot(d_hat.T)
plt.plot(ds_true.T, 'k--')
plt.show()
| bsd-3-clause |
plaidml/plaidml | networks/keras/examples/reuters_mlp_relu_vs_selu.py | 1 | 5648 | '''Compares self-normalizing MLPs with regular MLPs.
Compares the performance of a simple MLP using two
different activation functions: RELU and SELU
on the Reuters newswire topic classification task.
# Reference:
Klambauer, G., Unterthiner, T., Mayr, A., & Hochreiter, S. (2017).
Self-Normalizing Neural Networks. arXiv preprint arXiv:1706.02515.
https://arxiv.org/abs/1706.02515
'''
from __future__ import print_function
import numpy as np
# import matplotlib.pyplot as plt
import keras
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers.noise import AlphaDropout
from keras.preprocessing.text import Tokenizer
from example_correctness_test_utils import StopwatchManager
max_words = 1000
batch_size = 16
epochs = 1
plot = True
def create_network(n_dense=6,
dense_units=16,
activation='selu',
dropout=AlphaDropout,
dropout_rate=0.1,
kernel_initializer='lecun_normal',
optimizer='adam',
num_classes=1,
max_words=max_words):
"""Generic function to create a fully-connected neural network.
# Arguments
n_dense: int > 0. Number of dense layers.
dense_units: int > 0. Number of dense units per layer.
dropout: keras.layers.Layer. A dropout layer to apply.
dropout_rate: 0 <= float <= 1. The rate of dropout.
kernel_initializer: str. The initializer for the weights.
optimizer: str/keras.optimizers.Optimizer. The optimizer to use.
num_classes: int > 0. The number of classes to predict.
max_words: int > 0. The maximum number of words per data point.
# Returns
A Keras model instance (compiled).
"""
model = Sequential()
model.add(Dense(dense_units, input_shape=(max_words,), kernel_initializer=kernel_initializer))
model.add(Activation(activation))
model.add(dropout(dropout_rate))
for i in range(n_dense - 1):
model.add(Dense(dense_units, kernel_initializer=kernel_initializer))
model.add(Activation(activation))
model.add(dropout(dropout_rate))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
network1 = {
'n_dense': 6,
'dense_units': 16,
'activation': 'relu',
'dropout': Dropout,
'dropout_rate': 0.5,
'kernel_initializer': 'glorot_uniform',
'optimizer': 'sgd'
}
network2 = {
'n_dense': 6,
'dense_units': 16,
'activation': 'selu',
'dropout': AlphaDropout,
'dropout_rate': 0.1,
'kernel_initializer': 'lecun_normal',
'optimizer': 'sgd'
}
print('Loading data...')
(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=max_words, test_split=0.2)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
num_classes = np.max(y_train) + 1
print(num_classes, 'classes')
print('Vectorizing sequence data...')
tokenizer = Tokenizer(num_words=max_words)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Convert class vector to binary class matrix ' '(for use with categorical_crossentropy)')
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('y_train shape:', y_train.shape)
print('y_test shape:', y_test.shape)
sw_manager = StopwatchManager(stop_watch, compile_stop_watch)
print('\nBuilding network 1...')
model1 = create_network(num_classes=num_classes, **network1)
history_model1 = model1.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.1,
callbacks=[sw_manager])
score_model1 = model1.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)
print('\nBuilding network 2...')
model2 = create_network(num_classes=num_classes, **network2)
history_model2 = model2.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.1,
callbacks=[sw_manager])
score_model2 = model2.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)
print('\nNetwork 1 results')
print('Hyperparameters:', network1)
print('Test score:', score_model1[0])
print('Test accuracy:', score_model1[1])
print('Network 2 results')
print('Hyperparameters:', network2)
print('Test score:', score_model2[0])
print('Test accuracy:', score_model2[1])
output.contents = np.array([score_model1[0], score_model1[1], score_model2[0], score_model2[1]])
"""
plt.plot(range(epochs),
history_model1.history['val_loss'],
'g-',
label='Network 1 Val Loss')
plt.plot(range(epochs),
history_model2.history['val_loss'],
'r-',
label='Network 2 Val Loss')
plt.plot(range(epochs),
history_model1.history['loss'],
'g--',
label='Network 1 Loss')
plt.plot(range(epochs),
history_model2.history['loss'],
'r--',
label='Network 2 Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('comparison_of_networks.png')
"""
| apache-2.0 |
ChanderG/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
OshynSong/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
GbalsaC/bitnamiP | venv/share/doc/networkx-1.7/examples/graph/atlas.py | 20 | 2637 | #!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
#from networkx import *
#from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas=nx.graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U=nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree=[n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U=nx.disjoint_union(U,G)
# list of graphs of all connected components
C=nx.connected_component_subgraphs(U)
UU=nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist=[] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G,nlist):
nlist.append(G)
UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1,G2):
return True
return False
if __name__ == '__main__':
import networkx as nx
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
print(nx.number_connected_components(G),"connected components")
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
import matplotlib.pyplot as plt
plt.figure(1,figsize=(8,8))
# layout graphs with positions using graphviz neato
pos=nx.graphviz_layout(G,prog="neato")
# color nodes the same in each connected subgraph
C=nx.connected_component_subgraphs(G)
for g in C:
c=[random.random()]*nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png",dpi=75)
| agpl-3.0 |
rhiever/tpot | tests/one_hot_encoder_tests.py | 2 | 12412 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2014, Matthias Feurer
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import scipy.sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import load_iris, load_boston
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import cross_val_score, KFold
from nose.tools import assert_equal
from tpot.builtins import OneHotEncoder, auto_select_categorical_features, _transform_selected
iris_data = load_iris().data
dense1 = np.array([[0, 1, 0],
[0, 0, 0],
[1, 1, 0]])
dense1_1h = np.array([[1, 0, 0, 1, 1],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 1]])
dense1_1h_minimum_fraction = np.array([[0, 1, 0, 1, 1],
[0, 1, 1, 0, 1],
[1, 0, 0, 1, 1]])
# Including NaNs
dense2 = np.array([[0, np.NaN, 0],
[np.NaN, 0, 2],
[1, 1, 1],
[np.NaN, 0, 1]])
dense2_1h = np.array([[0, 1, 0, 1, 0, 0, 1, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 1, 0]])
dense2_1h_minimum_fraction = np.array([[1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 0],
[1, 0, 1, 0, 0, 1],
[0, 1, 0, 1, 0, 1]])
dense2_partial_1h = np.array([[0., 1., 0., 1., 0., 0., 0.],
[1., 0., 0., 0., 1., 0., 2.],
[0., 0., 1., 0., 0., 1., 1.],
[1., 0., 0., 0., 1., 0., 1.]])
dense2_1h_minimum_fraction_as_sparse = np.array([[0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 0, 1]])
# All NaN slice
dense3 = np.array([[0, 1, np.NaN],
[1, 0, np.NaN]])
dense3_1h = np.array([[1, 0, 0, 1, 1],
[0, 1, 1, 0, 1]])
sparse1 = scipy.sparse.csc_matrix(([3, 2, 1, 1, 2, 3],
((1, 4, 5, 2, 3, 5),
(0, 0, 0, 1, 1, 1))), shape=(6, 2))
sparse1_1h = scipy.sparse.csc_matrix(([1, 1, 1, 1, 1, 1],
((5, 4, 1, 2, 3, 5),
(0, 1, 2, 3, 4, 5))), shape=(6, 6))
sparse1_paratial_1h = scipy.sparse.csc_matrix(([1, 1, 1, 1, 2, 3],
((5, 4, 1, 2, 3, 5),
(0, 1, 2, 3, 3, 3))),
shape=(6, 4))
# All zeros slice
sparse2 = scipy.sparse.csc_matrix(([2, 1, 0, 0, 0, 0],
((1, 4, 5, 2, 3, 5),
(0, 0, 0, 1, 1, 1))), shape=(6, 2))
sparse2_1h = scipy.sparse.csc_matrix(([1, 1, 1, 1, 1, 1],
((5, 4, 1, 2, 3, 5),
(0, 1, 2, 3, 3, 3))), shape=(6, 4))
sparse2_csr = scipy.sparse.csr_matrix(([2, 1, 0, 0, 0, 0],
((1, 4, 5, 2, 3, 5),
(0, 0, 0, 1, 1, 1))), shape=(6, 2))
sparse2_csr_1h = scipy.sparse.csr_matrix(([1, 1, 1, 1, 1, 1],
((5, 4, 1, 2, 3, 5),
(0, 1, 2, 3, 3, 3))), shape=(6, 4))
def fit_then_transform(expected, input, categorical_features='all',
minimum_fraction=None):
# Test fit_transform
ohe = OneHotEncoder(categorical_features=categorical_features,
minimum_fraction=minimum_fraction)
transformation = ohe.fit_transform(input.copy())
assert_array_almost_equal(expected.astype(float),
transformation.todense())
# Test fit, and afterwards transform
ohe2 = OneHotEncoder(categorical_features=categorical_features,
minimum_fraction=minimum_fraction)
ohe2.fit(input.copy())
transformation = ohe2.transform(input.copy())
assert_array_almost_equal(expected, transformation.todense())
def fit_then_transform_dense(expected, input,
categorical_features='all',
minimum_fraction=None):
ohe = OneHotEncoder(categorical_features=categorical_features,
sparse=False, minimum_fraction=minimum_fraction)
transformation = ohe.fit_transform(input.copy())
assert_array_almost_equal(expected, transformation)
ohe2 = OneHotEncoder(categorical_features=categorical_features,
sparse=False, minimum_fraction=minimum_fraction)
ohe2.fit(input.copy())
transformation = ohe2.transform(input.copy())
assert_array_almost_equal(expected, transformation)
def test_auto_detect_categorical():
"""Assert that automatic selection of categorical features works as expected with a threshold of 10."""
selected = auto_select_categorical_features(iris_data[0:16, :], threshold=10)
expected = [False, False, True, True]
assert_equal(selected, expected)
def test_dense1():
"""Test fit_transform a dense matrix."""
fit_then_transform(dense1_1h, dense1)
fit_then_transform_dense(dense1_1h, dense1)
def test_dense1_minimum_fraction():
"""Test fit_transform a dense matrix with minimum_fraction=0.5."""
fit_then_transform(dense1_1h_minimum_fraction, dense1, minimum_fraction=0.5)
fit_then_transform_dense(dense1_1h_minimum_fraction, dense1, minimum_fraction=0.5)
def test_dense2():
"""Test fit_transform a dense matrix including NaNs."""
fit_then_transform(dense2_1h, dense2)
fit_then_transform_dense(dense2_1h, dense2)
def test_dense2_minimum_fraction():
"""Test fit_transform a dense matrix including NaNs with minimum_fraction=0.5"""
fit_then_transform(
dense2_1h_minimum_fraction,
dense2,
minimum_fraction=0.3
)
fit_then_transform_dense(
dense2_1h_minimum_fraction,
dense2,
minimum_fraction=0.3
)
def test_dense2_with_non_sparse_components():
"""Test fit_transform a dense matrix including NaNs with specifying categorical_features."""
fit_then_transform(
dense2_partial_1h,
dense2,
categorical_features=[True, True, False]
)
fit_then_transform_dense(
dense2_partial_1h,
dense2,
categorical_features=[True, True, False]
)
def test_sparse_on_dense2_minimum_fraction():
"""Test fit_transform a dense matrix with minimum_fraction as sparse"""
sparse = scipy.sparse.csr_matrix(dense2)
fit_then_transform(
dense2_1h_minimum_fraction_as_sparse,
sparse,
minimum_fraction=0.5
)
fit_then_transform_dense(
dense2_1h_minimum_fraction_as_sparse,
sparse,
minimum_fraction=0.5
)
# Minimum fraction is not too interesting here...
def test_dense3():
"""Test fit_transform a dense matrix including all NaN slice."""
fit_then_transform(dense3_1h, dense3)
fit_then_transform_dense(dense3_1h, dense3)
def test_sparse1():
"""Test fit_transform a sparse matrix."""
fit_then_transform(sparse1_1h.todense(), sparse1)
fit_then_transform_dense(sparse1_1h.todense(), sparse1)
def test_sparse1_minimum_fraction():
"""Test fit_transform a sparse matrix with minimum_fraction=0.5."""
expected = np.array([[0, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 1]], dtype=float).transpose()
fit_then_transform(
expected,
sparse1,
minimum_fraction=0.5
)
fit_then_transform_dense(
expected,
sparse1,
minimum_fraction=0.5
)
def test_sparse1_with_non_sparse_components():
"""Test fit_transform a sparse matrix with specifying categorical_features."""
fit_then_transform(
sparse1_paratial_1h.todense(),
sparse1,
categorical_features=[True, False]
)
def test_sparse2():
"""Test fit_transform a sparse matrix including all zeros slice."""
fit_then_transform(sparse2_1h.todense(), sparse2)
fit_then_transform_dense(sparse2_1h.todense(), sparse2)
def test_sparse2_minimum_fraction():
"""Test fit_transform a sparse matrix including all zeros slice with minimum_fraction=0.5."""
expected = np.array([[0, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 1]], dtype=float).transpose()
fit_then_transform(
expected,
sparse2,
minimum_fraction=0.5
)
fit_then_transform_dense(
expected,
sparse2,
minimum_fraction=0.5
)
def test_sparse2_csr():
"""Test fit_transform another sparse matrix including all zeros slice."""
fit_then_transform(sparse2_csr_1h.todense(), sparse2_csr)
fit_then_transform_dense(sparse2_csr_1h.todense(), sparse2_csr)
def test_transform():
"""Test OneHotEncoder with both dense and sparse matrixes."""
input = np.array(((0, 1, 2, 3, 4, 5), (0, 1, 2, 3, 4, 5))).transpose()
ohe = OneHotEncoder()
ohe.fit(input)
test_data = np.array(((0, 1, 2, 6), (0, 1, 6, 7))).transpose()
output = ohe.transform(test_data).todense()
assert np.sum(output) == 5
input = np.array(((0, 1, 2, 3, 4, 5), (0, 1, 2, 3, 4, 5))).transpose()
ips = scipy.sparse.csr_matrix(input)
ohe = OneHotEncoder()
ohe.fit(ips)
test_data = np.array(((0, 1, 2, 6), (0, 1, 6, 7))).transpose()
tds = scipy.sparse.csr_matrix(test_data)
output = ohe.transform(tds).todense()
assert np.sum(output) == 3
def test_transform_selected():
"""Assert _transform_selected return original X when selected is empty list"""
ohe = OneHotEncoder(categorical_features=[])
X = _transform_selected(
dense1,
ohe._fit_transform,
ohe.categorical_features,
copy=True
)
assert np.allclose(X, dense1)
def test_transform_selected_2():
"""Assert _transform_selected return original X when selected is a list of False values"""
ohe = OneHotEncoder(categorical_features=[False, False, False])
X = _transform_selected(
dense1,
ohe._fit_transform,
ohe.categorical_features,
copy=True
)
assert np.allclose(X, dense1)
def test_k_fold_cv():
"""Test OneHotEncoder with categorical_features='auto'."""
boston = load_boston()
clf = make_pipeline(
OneHotEncoder(
categorical_features='auto',
sparse=False,
minimum_fraction=0.05
),
LinearRegression()
)
cross_val_score(clf, boston.data, boston.target, cv=KFold(n_splits=10, shuffle=True))
| lgpl-3.0 |
madjelan/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
nest/nest-simulator | pynest/examples/glif_cond_neuron.py | 14 | 9655 | # -*- coding: utf-8 -*-
#
# glif_cond_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Conductance-based generalized leaky integrate and fire (GLIF) neuron example
----------------------------------------------------------------------------
Simple example of how to use the ``glif_cond`` neuron model for
five different levels of GLIF neurons.
Four stimulation paradigms are illustrated for the GLIF model
with externally applied current and spikes impinging
Voltage traces, injecting current traces, threshold traces, synaptic
conductance traces and spikes are shown.
KEYWORDS: glif_cond
"""
##############################################################################
# First, we import all necessary modules to simulate, analyze and plot this
# example.
import nest
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
##############################################################################
# We initialize the nest and set the simulation resolution.
nest.ResetKernel()
resolution = 0.05
nest.SetKernelStatus({"resolution": resolution})
###############################################################################
# We create the five levels of GLIF model to be tested, i.e.,
# ``lif``, ``lif_r``, ``lif_asc``, ``lif_r_asc``, ``lif_r_asc_a``.
# For each level of GLIF model, we create a ``glif_cond`` node. The node is
# created by setting relative model mechanism parameters. Other neuron
# parameters are set as default. The five ``glif_cond`` node handles are
# combined as a list. Note that the default number of synaptic ports
# is two for spike inputs. One port is excitation receptor with time
# constant being 0.2 ms and reversal potential being 0.0 mV. The other port is
# inhibition receptor with time constant being 2.0 ms and -85.0 mV.
# Note that users can set as many synaptic ports as needed for ``glif_cond``
# by setting array parameters ``tau_syn`` and ``E_rev`` of the model.
n_lif = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_r = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc_a = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": True})
neurons = n_lif + n_lif_r + n_lif_asc + n_lif_r_asc + n_lif_r_asc_a
###############################################################################
# For the stimulation input to the glif_cond neurons, we create one excitation
# spike generator and one inhibition spike generator, each of which generates
# three spikes; we also create one step current generator and a Poisson
# generator, a parrot neuron(to be paired with the Poisson generator).
# The three different injections are spread to three different time periods,
# i.e., 0 ms ~ 200 ms, 200 ms ~ 500 ms, 600 ms ~ 900 ms.
# Configuration of the current generator includes the definition of the start
# and stop times and the amplitude of the injected current. Configuration of
# the Poisson generator includes the definition of the start and stop times and
# the rate of the injected spike train.
espikes = nest.Create("spike_generator",
params={"spike_times": [10., 100., 150.],
"spike_weights": [20.]*3})
ispikes = nest.Create("spike_generator",
params={"spike_times": [15., 99., 150.],
"spike_weights": [-20.]*3})
cg = nest.Create("step_current_generator",
params={"amplitude_values": [400., ],
"amplitude_times": [200., ],
"start": 200., "stop": 500.})
pg = nest.Create("poisson_generator",
params={"rate": 15000., "start": 600., "stop": 900.})
pn = nest.Create("parrot_neuron")
###############################################################################
# The generators are then connected to the neurons. Specification of
# the ``receptor_type`` uniquely defines the target receptor.
# We connect current generator to receptor 0, the excitation spike generator
# and the Poisson generator (via parrot neuron) to receptor 1, and the
# inhibition spike generator to receptor 2 of the GLIF neurons.
# Note that Poisson generator is connected to parrot neuron to transit the
# spikes to the glif_cond neuron.
nest.Connect(cg, neurons, syn_spec={"delay": resolution})
nest.Connect(espikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 1})
nest.Connect(ispikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 2})
nest.Connect(pg, pn, syn_spec={"delay": resolution})
nest.Connect(pn, neurons, syn_spec={"delay": resolution, "receptor_type": 1})
###############################################################################
# A ``multimeter`` is created and connected to the neurons. The parameters
# specified for the multimeter include the list of quantities that should be
# recorded and the time interval at which quantities are measured.
mm = nest.Create("multimeter",
params={"interval": resolution,
"record_from": ["V_m", "I", "g_1", "g_2",
"threshold",
"threshold_spike",
"threshold_voltage",
"ASCurrents_sum"]})
nest.Connect(mm, neurons)
###############################################################################
# A ``spike_recorder`` is created and connected to the neurons record the
# spikes generated by the glif_cond neurons.
sr = nest.Create("spike_recorder")
nest.Connect(neurons, sr)
###############################################################################
# Run the simulation for 1000 ms and retrieve recorded data from
# the multimeter and spike recorder.
nest.Simulate(1000.)
data = mm.events
senders = data["senders"]
spike_data = sr.events
spike_senders = spike_data["senders"]
spikes = spike_data["times"]
###############################################################################
# We plot the time traces of the membrane potential (in blue) and
# the overall threshold (in green), and the spikes (as red dots) in one panel;
# the spike component of threshold (in yellow) and the voltage component of
# threshold (in black) in another panel; the injected currents (in strong blue),
# the sum of after spike currents (in cyan) in the third panel; and the synaptic
# conductances of the two receptors (in blue and orange) in responding to the
# spike inputs to the neurons in the fourth panel. We plot all these four
# panels for each level of GLIF model in a separated figure.
glif_models = ["lif", "lif_r", "lif_asc", "lif_r_asc", "lif_r_asc_a"]
for i in range(len(glif_models)):
glif_model = glif_models[i]
node_id = neurons[i].global_id
plt.figure(glif_model)
gs = gridspec.GridSpec(4, 1, height_ratios=[2, 1, 1, 1])
t = data["times"][senders == 1]
ax1 = plt.subplot(gs[0])
plt.plot(t, data["V_m"][senders == node_id], "b")
plt.plot(t, data["threshold"][senders == node_id], "g--")
plt.plot(spikes[spike_senders == node_id],
[max(data["threshold"][senders == node_id]) * 0.95] *
len(spikes[spike_senders == node_id]), "r.")
plt.legend(["V_m", "threshold", "spike"])
plt.ylabel("V (mV)")
plt.title("Simulation of glif_cond neuron of " + glif_model)
ax2 = plt.subplot(gs[1])
plt.plot(t, data["threshold_spike"][senders == node_id], "y")
plt.plot(t, data["threshold_voltage"][senders == node_id], "k--")
plt.legend(["threshold_spike", "threshold_voltage"])
plt.ylabel("V (mV)")
ax3 = plt.subplot(gs[2])
plt.plot(t, data["I"][senders == node_id], "--")
plt.plot(t, data["ASCurrents_sum"][senders == node_id], "c-.")
plt.legend(["I_e", "ASCurrents_sum", "I_syn"])
plt.ylabel("I (pA)")
plt.xlabel("t (ms)")
ax4 = plt.subplot(gs[3])
plt.plot(t, data["g_1"][senders == node_id], "-")
plt.plot(t, data["g_2"][senders == node_id], "--")
plt.legend(["G_1", "G_2"])
plt.ylabel("G (nS)")
plt.xlabel("t (ms)")
plt.show()
| gpl-2.0 |
cauchycui/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/pipeline.py | 13 | 30670 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# License: BSD
from collections import defaultdict
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import clone, BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed, Memory
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
__all__ = ['Pipeline', 'FeatureUnion']
class _BasePipeline(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Handles parameter management for classifiers composed of named steps.
"""
@abstractmethod
def __init__(self):
pass
def _replace_step(self, steps_attr, name, new_val):
# assumes `name` is a valid step name
new_steps = getattr(self, steps_attr)[:]
for i, (step_name, _) in enumerate(new_steps):
if step_name == name:
new_steps[i] = (name, new_val)
break
setattr(self, steps_attr, new_steps)
def _get_params(self, steps_attr, deep=True):
out = super(_BasePipeline, self).get_params(deep=False)
if not deep:
return out
steps = getattr(self, steps_attr)
out.update(steps)
for name, estimator in steps:
if estimator is None:
continue
for key, value in six.iteritems(estimator.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _set_params(self, steps_attr, **params):
# Ensure strict ordering of parameter setting:
# 1. All steps
if steps_attr in params:
setattr(self, steps_attr, params.pop(steps_attr))
# 2. Step replacement
step_names, _ = zip(*getattr(self, steps_attr))
for name in list(six.iterkeys(params)):
if '__' not in name and name in step_names:
self._replace_step(steps_attr, name, params.pop(name))
# 3. Step parameters and other initilisation arguments
super(_BasePipeline, self).set_params(**params)
return self
def _validate_names(self, names):
if len(set(names)) != len(names):
raise ValueError('Names provided are not unique: '
'{0!r}'.format(list(names)))
invalid_names = set(names).intersection(self.get_params(deep=False))
if invalid_names:
raise ValueError('Step names conflict with constructor arguments: '
'{0!r}'.format(sorted(invalid_names)))
invalid_names = [name for name in names if '__' in name]
if invalid_names:
raise ValueError('Step names must not contain __: got '
'{0!r}'.format(invalid_names))
class Pipeline(_BasePipeline):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The transformers in the pipeline can be cached using ```memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
A step's estimator may be replaced entirely by setting the parameter
with its name to another estimator, or a transformer removed by setting
to None.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
memory : Instance of joblib.Memory or string, optional (default=None)
Used to caching the fitted transformers of the transformer of the
pipeline. By default, no cache is performed.
If a string is given, it is the path to the caching directory.
Enabling caching triggers a clone of the transformers before fitting.
Therefore, the transformer instance given to the pipeline cannot be
inspected directly. Use the attribute ``named_steps`` or ``steps``
to inspect estimators within the pipeline.
Caching the transformers is advantageous when fitting is time
consuming.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Pipeline(memory=None,
steps=[('anova', SelectKBest(...)),
('svc', SVC(...))])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.829...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([False, False, True, True, False, False, True, True, False,
True, False, True, True, False, True, False, True, True,
False, False], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps, memory=None):
# shallow copy of steps
self.steps = tosequence(steps)
self._validate_steps()
self.memory = memory
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params('steps', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self
"""
self._set_params('steps', **kwargs)
return self
def _validate_steps(self):
names, estimators = zip(*self.steps)
# validate names
self._validate_names(names)
# validate estimators
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None:
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps should be "
"transformers and implement fit and transform."
" '%s' (type %s) doesn't" % (t, type(t)))
# We allow last estimator to be None as an identity transformation
if estimator is not None and not hasattr(estimator, "fit"):
raise TypeError("Last step of Pipeline should implement fit. "
"'%s' (type %s) doesn't"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _fit(self, X, y=None, **fit_params):
self._validate_steps()
# Setup the memory
memory = self.memory
if memory is None:
memory = Memory(cachedir=None, verbose=0)
elif isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
elif not isinstance(memory, Memory):
raise ValueError("'memory' should either be a string or"
" a joblib.Memory instance, got"
" 'memory={!r}' instead.".format(memory))
fit_transform_one_cached = memory.cache(_fit_transform_one)
fit_params_steps = dict((name, {}) for name, step in self.steps
if step is not None)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for step_idx, (name, transformer) in enumerate(self.steps[:-1]):
if transformer is None:
pass
else:
if memory.cachedir is None:
# we do not clone when caching is disabled to preserve
# backward compatibility
cloned_transformer = transformer
else:
cloned_transformer = clone(transformer)
# Fit or load from cache the current transfomer
Xt, fitted_transformer = fit_transform_one_cached(
cloned_transformer, None, Xt, y,
**fit_params_steps[name])
# Replace the transformer of the step with the fitted
# transformer. This is necessary when loading the transformer
# from the cache.
self.steps[step_idx] = (name, fitted_transformer)
if self._final_estimator is None:
return Xt, {}
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit the model
Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
self : Pipeline
This estimator
"""
Xt, fit_params = self._fit(X, y, **fit_params)
if self._final_estimator is not None:
self._final_estimator.fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit the model and transform with the final estimator
Fits all the transforms one after the other and transforms the
data, then uses fit_transform on transformed data with the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like, shape = [n_samples, n_transformed_features]
Transformed samples
"""
last_step = self._final_estimator
Xt, fit_params = self._fit(X, y, **fit_params)
if hasattr(last_step, 'fit_transform'):
return last_step.fit_transform(Xt, y, **fit_params)
elif last_step is None:
return Xt
else:
return last_step.fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Apply transforms to the data, and predict with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_pred : array-like
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
y_pred : array-like
"""
Xt, fit_params = self._fit(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Apply transforms, and predict_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_proba : array-like, shape = [n_samples, n_classes]
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Apply transforms, and decision_function of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes]
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Apply transforms, and predict_log_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes]
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@property
def transform(self):
"""Apply transforms, and transform with the final estimator
This also works where final estimator is ``None``: all prior
transformations are applied.
Parameters
----------
X : iterable
Data to transform. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
Xt : array-like, shape = [n_samples, n_transformed_features]
"""
# _final_estimator is None or has transform, otherwise attribute error
if self._final_estimator is not None:
self._final_estimator.transform
return self._transform
def _transform(self, X):
Xt = X
for name, transform in self.steps:
if transform is not None:
Xt = transform.transform(Xt)
return Xt
@property
def inverse_transform(self):
"""Apply inverse transformations in reverse order
All estimators in the pipeline must support ``inverse_transform``.
Parameters
----------
Xt : array-like, shape = [n_samples, n_transformed_features]
Data samples, where ``n_samples`` is the number of samples and
``n_features`` is the number of features. Must fulfill
input requirements of last step of pipeline's
``inverse_transform`` method.
Returns
-------
Xt : array-like, shape = [n_samples, n_features]
"""
# raise AttributeError if necessary for hasattr behaviour
for name, transform in self.steps:
if transform is not None:
transform.inverse_transform
return self._inverse_transform
def _inverse_transform(self, X):
Xt = X
for name, transform in self.steps[::-1]:
if transform is not None:
Xt = transform.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None, sample_weight=None):
"""Apply transforms, and score with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
sample_weight : array-like, default=None
If not None, this argument is passed as ``sample_weight`` keyword
argument to the ``score`` method of the final estimator.
Returns
-------
score : float
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
score_params = {}
if sample_weight is not None:
score_params['sample_weight'] = sample_weight
return self.steps[-1][-1].score(Xt, y, **score_params)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
... # doctest: +NORMALIZE_WHITESPACE
Pipeline(memory=None,
steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB(priors=None))])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, weight, X):
res = transformer.transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res
return res * weight
def _fit_transform_one(transformer, weight, X, y,
**fit_params):
if hasattr(transformer, 'fit_transform'):
res = transformer.fit_transform(X, y, **fit_params)
else:
res = transformer.fit(X, y, **fit_params).transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res, transformer
return res * weight, transformer
class FeatureUnion(_BasePipeline, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters of the transformers may be set using its name and the parameter
name separated by a '__'. A transformer may be replaced entirely by
setting the parameter with its name to another transformer,
or removed by setting to ``None``.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list : list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs : int, optional
Number of jobs to run in parallel (default 1).
transformer_weights : dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = tosequence(transformer_list)
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
self._validate_transformers()
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params('transformer_list', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self
"""
self._set_params('transformer_list', **kwargs)
return self
def _validate_transformers(self):
names, transformers = zip(*self.transformer_list)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t is None:
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All estimators should implement fit and "
"transform. '%s' (type %s) doesn't" %
(t, type(t)))
def _iter(self):
"""Generate (name, est, weight) tuples excluding None transformers
"""
get_weight = (self.transformer_weights or {}).get
return ((name, trans, get_weight(name))
for name, trans in self.transformer_list
if trans is not None)
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans, weight in self._iter():
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s (type %s) does not "
"provide get_feature_names."
% (str(name), type(trans).__name__))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : FeatureUnion
This estimator
"""
self._validate_transformers()
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for _, trans, _ in self._iter())
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
self._validate_transformers()
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, weight, X, y,
**fit_params)
for name, trans, weight in self._iter())
if not result:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, weight, X)
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def _update_transformer_list(self, transformers):
transformers = iter(transformers)
self.transformer_list[:] = [
(name, None if old is None else next(transformers))
for name, old in self.transformer_list
]
def make_union(*transformers, **kwargs):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Parameters
----------
*transformers : list of estimators
n_jobs : int, optional
Number of jobs to run in parallel (default 1).
Returns
-------
f : FeatureUnion
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca',
PCA(copy=True, iterated_power='auto',
n_components=None, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
"""
n_jobs = kwargs.pop('n_jobs', 1)
if kwargs:
# We do not currently support `transformer_weights` as we may want to
# change its type spec in make_union
raise TypeError('Unknown keyword arguments: "{}"'
.format(list(kwargs.keys())[0]))
return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs)
| bsd-3-clause |
TakayukiSakai/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 5 | 8987 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn and matplotlib to visualize embeddings.")
| apache-2.0 |
pavel-paulau/perfreports | perfreports/plotter.py | 1 | 1221 | import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams.update({'font.size': 5})
matplotlib.rcParams.update({'lines.linewidth': 0.5})
matplotlib.rcParams.update({'lines.marker': '.'})
matplotlib.rcParams.update({'lines.markersize': 3})
matplotlib.rcParams.update({'lines.linestyle': 'None'})
matplotlib.rcParams.update({'axes.linewidth': 0.5})
matplotlib.rcParams.update({'axes.grid': True})
matplotlib.rcParams.update({'axes.formatter.limits': (-6, 6)})
matplotlib.rcParams.update({'legend.numpoints': 1})
matplotlib.rcParams.update({'legend.fancybox': True})
matplotlib.rcParams.update({'legend.markerscale': 1.5})
matplotlib.rcParams.update({'legend.loc': 0})
matplotlib.rcParams.update({'legend.frameon': True})
import matplotlib.pyplot as plt
from perfreports.constants import PALETTE
def plot(series, ylabel, fname):
fig = plt.figure(figsize=(4.66, 2.625))
ax = fig.add_subplot(*(1, 1, 1))
ax.ticklabel_format(useOffset=False)
ax.set_ylabel(ylabel)
ax.set_xlabel('Time elapsed, sec')
ax.plot(series.index, series, color=PALETTE[0])
ymin, ymax = ax.get_ylim()
plt.ylim(ymin=0, ymax=max(1, ymax * 1.05))
fig.tight_layout()
fig.savefig(fname, dpi=200)
plt.close()
| apache-2.0 |
dopplershift/MetPy | src/metpy/io/gini.py | 1 | 17130 | # Copyright (c) 2015,2016,2017,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tools to process GINI-formatted products."""
import contextlib
from datetime import datetime
from enum import Enum
from io import BytesIO
from itertools import repeat # noqa: I202
import logging
import re
import numpy as np
from xarray import Variable
from xarray.backends.common import AbstractDataStore
from xarray.core.utils import FrozenDict
from ._tools import Bits, IOBuffer, NamedStruct, open_as_needed, zlib_decompress_all_frames
from ..package_tools import Exporter
exporter = Exporter(globals())
log = logging.getLogger(__name__)
def _make_datetime(s):
r"""Convert 7 bytes from a GINI file to a `datetime` instance."""
year, month, day, hour, minute, second, cs = s
if year < 70:
year += 100
return datetime(1900 + year, month, day, hour, minute, second, 10000 * cs)
def _scaled_int(s):
r"""Convert a 3 byte string to a signed integer value."""
# Get leftmost bit (sign) as 1 (if 0) or -1 (if 1)
sign = 1 - ((s[0] & 0x80) >> 6)
# Combine remaining bits
int_val = (((s[0] & 0x7f) << 16) | (s[1] << 8) | s[2])
log.debug('Source: %s Int: %x Sign: %d', ' '.join(hex(c) for c in s), int_val, sign)
# Return scaled and with proper sign
return (sign * int_val) / 10000.
def _name_lookup(names):
r"""Create an io helper to convert an integer to a named value."""
mapper = dict(zip(range(len(names)), names))
def lookup(val):
return mapper.get(val, 'UnknownValue')
return lookup
class GiniProjection(Enum):
r"""Represents projection values in GINI files."""
mercator = 1
lambert_conformal = 3
polar_stereographic = 5
@exporter.export
class GiniFile(AbstractDataStore):
"""A class that handles reading the GINI format satellite images from the NWS.
This class attempts to decode every byte that is in a given GINI file.
Notes
-----
The internal data structures that things are decoded into are subject to change.
"""
missing = 255
wmo_finder = re.compile('(T\\w{3}\\d{2})[\\s\\w\\d]+\\w*(\\w{3})\r\r\n')
crafts = ['Unknown', 'Unknown', 'Miscellaneous', 'JERS', 'ERS/QuikSCAT', 'POES/NPOESS',
'Composite', 'DMSP', 'GMS', 'METEOSAT', 'GOES-7', 'GOES-8', 'GOES-9',
'GOES-10', 'GOES-11', 'GOES-12', 'GOES-13', 'GOES-14', 'GOES-15', 'GOES-16']
sectors = ['NH Composite', 'East CONUS', 'West CONUS', 'Alaska Regional',
'Alaska National', 'Hawaii Regional', 'Hawaii National', 'Puerto Rico Regional',
'Puerto Rico National', 'Supernational', 'NH Composite', 'Central CONUS',
'East Floater', 'West Floater', 'Central Floater', 'Polar Floater']
channels = ['Unknown', 'Visible', 'IR (3.9 micron)', 'WV (6.5/6.7 micron)',
'IR (11 micron)', 'IR (12 micron)', 'IR (13 micron)', 'IR (1.3 micron)',
'Reserved', 'Reserved', 'Reserved', 'Reserved', 'Reserved', 'LI (Imager)',
'PW (Imager)', 'Surface Skin Temp (Imager)', 'LI (Sounder)', 'PW (Sounder)',
'Surface Skin Temp (Sounder)', 'CAPE', 'Land-sea Temp', 'WINDEX',
'Dry Microburst Potential Index', 'Microburst Day Potential Index',
'Convective Inhibition', 'Volcano Imagery', 'Scatterometer', 'Cloud Top',
'Cloud Amount', 'Rainfall Rate', 'Surface Wind Speed', 'Surface Wetness',
'Ice Concentration', 'Ice Type', 'Ice Edge', 'Cloud Water Content',
'Surface Type', 'Snow Indicator', 'Snow/Water Content', 'Volcano Imagery',
'Reserved', 'Sounder (14.71 micron)', 'Sounder (14.37 micron)',
'Sounder (14.06 micron)', 'Sounder (13.64 micron)', 'Sounder (13.37 micron)',
'Sounder (12.66 micron)', 'Sounder (12.02 micron)', 'Sounder (11.03 micron)',
'Sounder (9.71 micron)', 'Sounder (7.43 micron)', 'Sounder (7.02 micron)',
'Sounder (6.51 micron)', 'Sounder (4.57 micron)', 'Sounder (4.52 micron)',
'Sounder (4.45 micron)', 'Sounder (4.13 micron)', 'Sounder (3.98 micron)',
# Percent Normal TPW found empirically from Service Change Notice 20-03
'Sounder (3.74 micron)', 'Sounder (Visible)', 'Percent Normal TPW']
prod_desc_fmt = NamedStruct([('source', 'b'),
('creating_entity', 'b', _name_lookup(crafts)),
('sector_id', 'b', _name_lookup(sectors)),
('channel', 'b', _name_lookup(channels)),
('num_records', 'H'), ('record_len', 'H'),
('datetime', '7s', _make_datetime),
('projection', 'b', GiniProjection), ('nx', 'H'), ('ny', 'H'),
('la1', '3s', _scaled_int), ('lo1', '3s', _scaled_int)
], '>', 'ProdDescStart')
lc_ps_fmt = NamedStruct([('reserved', 'b'), ('lov', '3s', _scaled_int),
('dx', '3s', _scaled_int), ('dy', '3s', _scaled_int),
('proj_center', 'b')], '>', 'LambertOrPolarProjection')
mercator_fmt = NamedStruct([('resolution', 'b'), ('la2', '3s', _scaled_int),
('lo2', '3s', _scaled_int), ('di', 'H'), ('dj', 'H')
], '>', 'MercatorProjection')
prod_desc2_fmt = NamedStruct([('scanning_mode', 'b', Bits(3)),
('lat_in', '3s', _scaled_int), ('resolution', 'b'),
('compression', 'b'), ('version', 'b'), ('pdb_size', 'H'),
('nav_cal', 'b')], '>', 'ProdDescEnd')
nav_fmt = NamedStruct([('sat_lat', '3s', _scaled_int), ('sat_lon', '3s', _scaled_int),
('sat_height', 'H'), ('ur_lat', '3s', _scaled_int),
('ur_lon', '3s', _scaled_int)], '>', 'Navigation')
def __init__(self, filename):
r"""Create an instance of `GiniFile`.
Parameters
----------
filename : str or file-like object
If str, the name of the file to be opened. Gzip-ed files are
recognized with the extension ``'.gz'``, as are bzip2-ed files with
the extension ``'.bz2'`` If `filename` is a file-like object,
this will be read from directly.
"""
fobj = open_as_needed(filename)
# Just read in the entire set of data at once
with contextlib.closing(fobj):
self._buffer = IOBuffer.fromfile(fobj)
# Pop off the WMO header if we find it
self.wmo_code = ''
self._process_wmo_header()
log.debug('First wmo code: %s', self.wmo_code)
# Decompress the data if necessary, and if so, pop off new header
log.debug('Length before decompression: %s', len(self._buffer))
self._buffer = IOBuffer(self._buffer.read_func(zlib_decompress_all_frames))
log.debug('Length after decompression: %s', len(self._buffer))
# Process WMO header inside compressed data if necessary
self._process_wmo_header()
log.debug('2nd wmo code: %s', self.wmo_code)
# Read product description start
start = self._buffer.set_mark()
#: :desc: Decoded first section of product description block
#: :type: namedtuple
self.prod_desc = self._buffer.read_struct(self.prod_desc_fmt)
log.debug(self.prod_desc)
#: :desc: Decoded geographic projection information
#: :type: namedtuple
self.proj_info = None
# Handle projection-dependent parts
if self.prod_desc.projection in (GiniProjection.lambert_conformal,
GiniProjection.polar_stereographic):
self.proj_info = self._buffer.read_struct(self.lc_ps_fmt)
elif self.prod_desc.projection == GiniProjection.mercator:
self.proj_info = self._buffer.read_struct(self.mercator_fmt)
else:
log.warning('Unknown projection: %d', self.prod_desc.projection)
log.debug(self.proj_info)
# Read the rest of the guaranteed product description block (PDB)
#: :desc: Decoded second section of product description block
#: :type: namedtuple
self.prod_desc2 = self._buffer.read_struct(self.prod_desc2_fmt)
log.debug(self.prod_desc2)
if self.prod_desc2.nav_cal not in (0, -128): # TODO: See how GEMPAK/MCIDAS parses
# Only warn if there actually seems to be useful navigation data
if self._buffer.get_next(self.nav_fmt.size) != b'\x00' * self.nav_fmt.size:
log.warning('Navigation/Calibration unhandled: %d', self.prod_desc2.nav_cal)
if self.prod_desc2.nav_cal in (1, 2):
self.navigation = self._buffer.read_struct(self.nav_fmt)
log.debug(self.navigation)
# Catch bad PDB with size set to 0
if self.prod_desc2.pdb_size == 0:
log.warning('Adjusting bad PDB size from 0 to 512.')
self.prod_desc2 = self.prod_desc2._replace(pdb_size=512)
# Jump past the remaining empty bytes in the product description block
self._buffer.jump_to(start, self.prod_desc2.pdb_size)
# Read the actual raster--unless it's PNG compressed, in which case that happens later
blob = self._buffer.read(self.prod_desc.num_records * self.prod_desc.record_len)
# Check for end marker
end = self._buffer.read(self.prod_desc.record_len)
if end != b''.join(repeat(b'\xff\x00', self.prod_desc.record_len // 2)):
log.warning('End marker not as expected: %s', end)
# Check to ensure that we processed all of the data
if not self._buffer.at_end():
if not blob:
log.debug('No data read yet, trying to decompress remaining data as an image.')
from matplotlib.image import imread
blob = (imread(BytesIO(self._buffer.read())) * 255).astype('uint8')
else:
log.warning('Leftover unprocessed data beyond EOF marker: %s',
self._buffer.get_next(10))
self.data = np.array(blob).reshape((self.prod_desc.ny,
self.prod_desc.nx))
def _process_wmo_header(self):
"""Read off the WMO header from the file, if necessary."""
data = self._buffer.get_next(64).decode('utf-8', 'ignore')
match = self.wmo_finder.search(data)
if match:
self.wmo_code = match.groups()[0]
self.siteID = match.groups()[-1]
self._buffer.skip(match.end())
def __str__(self):
"""Return a string representation of the product."""
parts = [self.__class__.__name__ + ': {0.creating_entity} {0.sector_id} {0.channel}',
'Time: {0.datetime}', 'Size: {0.ny}x{0.nx}',
'Projection: {0.projection.name}',
'Lower Left Corner (Lon, Lat): ({0.lo1}, {0.la1})',
'Resolution: {1.resolution}km']
return '\n\t'.join(parts).format(self.prod_desc, self.prod_desc2)
def _make_proj_var(self):
proj_info = self.proj_info
prod_desc2 = self.prod_desc2
attrs = {'earth_radius': 6371200.0}
if self.prod_desc.projection == GiniProjection.lambert_conformal:
attrs['grid_mapping_name'] = 'lambert_conformal_conic'
attrs['standard_parallel'] = prod_desc2.lat_in
attrs['longitude_of_central_meridian'] = proj_info.lov
attrs['latitude_of_projection_origin'] = prod_desc2.lat_in
elif self.prod_desc.projection == GiniProjection.polar_stereographic:
attrs['grid_mapping_name'] = 'polar_stereographic'
attrs['straight_vertical_longitude_from_pole'] = proj_info.lov
attrs['latitude_of_projection_origin'] = -90 if proj_info.proj_center else 90
attrs['standard_parallel'] = 60.0 # See Note 2 for Table 4.4A in ICD
elif self.prod_desc.projection == GiniProjection.mercator:
attrs['grid_mapping_name'] = 'mercator'
attrs['longitude_of_projection_origin'] = self.prod_desc.lo1
attrs['latitude_of_projection_origin'] = self.prod_desc.la1
attrs['standard_parallel'] = prod_desc2.lat_in
else:
raise NotImplementedError(
f'Unhandled GINI Projection: {self.prod_desc.projection}')
return 'projection', Variable((), 0, attrs)
def _make_time_var(self):
base_time = self.prod_desc.datetime.replace(hour=0, minute=0, second=0, microsecond=0)
offset = self.prod_desc.datetime - base_time
time_var = Variable((), data=offset.seconds + offset.microseconds / 1e6,
attrs={'units': 'seconds since ' + base_time.isoformat()})
return 'time', time_var
def _get_proj_and_res(self):
import pyproj
proj_info = self.proj_info
prod_desc2 = self.prod_desc2
kwargs = {'a': 6371200.0, 'b': 6371200.0}
if self.prod_desc.projection == GiniProjection.lambert_conformal:
kwargs['proj'] = 'lcc'
kwargs['lat_0'] = prod_desc2.lat_in
kwargs['lon_0'] = proj_info.lov
kwargs['lat_1'] = prod_desc2.lat_in
kwargs['lat_2'] = prod_desc2.lat_in
dx, dy = proj_info.dx, proj_info.dy
elif self.prod_desc.projection == GiniProjection.polar_stereographic:
kwargs['proj'] = 'stere'
kwargs['lon_0'] = proj_info.lov
kwargs['lat_0'] = -90 if proj_info.proj_center else 90
kwargs['lat_ts'] = 60.0 # See Note 2 for Table 4.4A in ICD
kwargs['x_0'] = False # Easting
kwargs['y_0'] = False # Northing
dx, dy = proj_info.dx, proj_info.dy
elif self.prod_desc.projection == GiniProjection.mercator:
kwargs['proj'] = 'merc'
kwargs['lat_0'] = self.prod_desc.la1
kwargs['lon_0'] = self.prod_desc.lo1
kwargs['lat_ts'] = prod_desc2.lat_in
kwargs['x_0'] = False # Easting
kwargs['y_0'] = False # Northing
dx, dy = prod_desc2.resolution, prod_desc2.resolution
return pyproj.Proj(**kwargs), dx, dy
def _make_coord_vars(self):
proj, dx, dy = self._get_proj_and_res()
# Get projected location of lower left point
x0, y0 = proj(self.prod_desc.lo1, self.prod_desc.la1)
# Coordinate variable for x
xlocs = x0 + np.arange(self.prod_desc.nx) * (1000. * dx)
attrs = {'units': 'm', 'long_name': 'x coordinate of projection',
'standard_name': 'projection_x_coordinate'}
x_var = Variable(('x',), xlocs, attrs)
# Now y--Need to flip y because we calculated from the lower left corner,
# but the raster data is stored with top row first.
ylocs = (y0 + np.arange(self.prod_desc.ny) * (1000. * dy))[::-1]
attrs = {'units': 'm', 'long_name': 'y coordinate of projection',
'standard_name': 'projection_y_coordinate'}
y_var = Variable(('y',), ylocs, attrs)
# Get the two-D lon,lat grid as well
x, y = np.meshgrid(xlocs, ylocs)
lon, lat = proj(x, y, inverse=True)
lon_var = Variable(('y', 'x'), data=lon,
attrs={'long_name': 'longitude', 'units': 'degrees_east'})
lat_var = Variable(('y', 'x'), data=lat,
attrs={'long_name': 'latitude', 'units': 'degrees_north'})
return [('x', x_var), ('y', y_var), ('lon', lon_var), ('lat', lat_var)]
def get_variables(self):
"""Get all variables in the file.
This is used by `xarray.open_dataset`.
"""
variables = [self._make_time_var()]
proj_var_name, proj_var = self._make_proj_var()
variables.append((proj_var_name, proj_var))
variables.extend(self._make_coord_vars())
# Now the data
name = self.prod_desc.channel
if '(' in name:
name = name.split('(')[0].rstrip()
missing_val = self.missing
attrs = {'long_name': self.prod_desc.channel, 'missing_value': missing_val,
'coordinates': 'y x time', 'grid_mapping': proj_var_name}
data_var = Variable(('y', 'x'),
data=np.ma.array(self.data,
mask=self.data == missing_val),
attrs=attrs)
variables.append((name, data_var))
return FrozenDict(variables)
def get_attrs(self):
"""Get the global attributes.
This is used by `xarray.open_dataset`.
"""
return FrozenDict(satellite=self.prod_desc.creating_entity,
sector=self.prod_desc.sector_id)
| bsd-3-clause |
louispotok/pandas | pandas/tests/indexes/timedeltas/test_timedelta_range.py | 3 | 3021 | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, Second
from pandas import to_timedelta, timedelta_range
class TestTimedeltas(object):
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5), unit='D')
result = timedelta_range('0 days', periods=5, freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11), unit='D')
result = timedelta_range('0 days', '10 days', freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5), unit='D') + Second(2) + Day()
result = timedelta_range('1 days, 00:00:02', '5 days, 00:00:02',
freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta([1, 3, 5, 7, 9], unit='D') + Second(2)
result = timedelta_range('1 days, 00:00:02', periods=5, freq='2D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50), unit='T') * 30
result = timedelta_range('0 days', freq='30T', periods=50)
tm.assert_index_equal(result, expected)
# GH 11776
arr = np.arange(10).reshape(2, 5)
df = pd.DataFrame(np.arange(10).reshape(2, 5))
for arg in (arr, df):
with tm.assert_raises_regex(TypeError, "1-d array"):
to_timedelta(arg)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assert_raises_regex(TypeError, "1-d array"):
to_timedelta(arg, errors=errors)
# issue10583
df = pd.DataFrame(np.random.normal(size=(10, 4)))
df.index = pd.timedelta_range(start='0s', periods=10, freq='s')
expected = df.loc[pd.Timedelta('0s'):, :]
result = df.loc['0s':, :]
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize('periods, freq', [
(3, '2D'), (5, 'D'), (6, '19H12T'), (7, '16H'), (9, '12H')])
def test_linspace_behavior(self, periods, freq):
# GH 20976
result = timedelta_range(start='0 days', end='4 days', periods=periods)
expected = timedelta_range(start='0 days', end='4 days', freq=freq)
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = ('Of the four parameters: start, end, periods, and freq, '
'exactly three must be specified')
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(start='0 days')
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(end='5 days')
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(periods=2)
with tm.assert_raises_regex(ValueError, msg):
timedelta_range()
# too many params
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(start='0 days', end='5 days', periods=10, freq='H')
| bsd-3-clause |
shangwuhencc/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
google-research/google-research | automl_zero/generate_datasets.py | 1 | 11970 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for generating the binary classification datasets from CIFAR10/MNIST.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.metrics
import sklearn.model_selection
import sklearn.preprocessing
import tensorflow_datasets as tfds
import task_pb2
flags.DEFINE_string(
'data_dir', '/tmp/binary_cifar10_data/',
'Path of the folder to save the datasets.')
flags.DEFINE_string(
'tfds_data_dir', '/tmp/',
'Path for tensorflow_datasets to cache downloaded datasets, '
'only used in local runs.')
flags.DEFINE_integer('num_train_examples', 8000,
'Number of training examples in each dataset.')
flags.DEFINE_integer('num_valid_examples', 1000,
'Number of validation examples in each dataset.')
flags.DEFINE_integer('num_test_examples', 1000,
'Number of test examples in each dataset.')
flags.DEFINE_integer('projected_dim', 16,
'The dimensionality to project the data into.')
flags.DEFINE_string('dataset_name', 'cifar10',
'Name of the dataset to generatee '
'more binary classification datasets.')
flags.DEFINE_integer('min_data_seed', 0,
'Generate one dataset for each seed in '
'[min_data_seed, max_data_seed).')
flags.DEFINE_integer('max_data_seed', 100,
'Generate one dataset for each seed in '
'[min_data_seed, max_data_seed).')
flags.DEFINE_list('class_ids', '0,1,2,3,4,5,6,7,8,9',
'Classes included to generate binary'
' classification datasets.')
FLAGS = flags.FLAGS
def create_projected_binary_dataset(
dataset_name, positive_class, negative_class,
num_train_examples, num_valid_examples, num_test_examples,
projected_dim, seed, load_fn):
"""Create a projected binary dataset from the given spec and seed."""
num_samples = (
num_train_examples +
num_valid_examples +
num_test_examples)
pos = positive_class
neg = negative_class
# Only support training data from MNIST and CIFAR10 for experiments.
data, labels, _, _ = get_dataset(
dataset_name,
int(num_samples / 2), [pos, neg], load_fn=load_fn)
labels[np.where(labels == pos)] = -1
labels[np.where(labels == neg)] = 0
labels[np.where(labels == -1)] = 1
(train_data, train_labels, valid_data, valid_labels,
test_data, test_labels) = train_valid_test_split(
data, labels,
num_train_examples,
num_valid_examples,
num_test_examples,
seed)
np.random.seed(seed)
random_mat = np.random.randn(
train_data.shape[-1], projected_dim)
train_data = np.dot(train_data, random_mat)
valid_data = np.dot(valid_data, random_mat)
if test_data is not None:
test_data = np.dot(test_data, random_mat)
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit(train_data)
train_data = scaler.transform(train_data)
valid_data = scaler.transform(valid_data)
if test_data is not None:
test_data = scaler.transform(test_data)
dataset = task_pb2.ScalarLabelDataset()
for i in range(train_data.shape[0]):
train_feature = dataset.train_features.add()
train_feature.features.extend(list(train_data[i]))
dataset.train_labels.append(train_labels[i])
for i in range(valid_data.shape[0]):
valid_feature = dataset.valid_features.add()
valid_feature.features.extend(list(valid_data[i]))
dataset.valid_labels.append(valid_labels[i])
if test_data is not None:
for i in range(test_data.shape[0]):
test_feature = dataset.test_features.add()
test_feature.features.extend(list(test_data[i]))
dataset.test_labels.append(test_labels[i])
return dataset
def load_projected_binary_dataset(saved_dataset):
"""Load the binary dataset saved in a ScalarLabelDataset proto."""
num_train = len(saved_dataset.train_labels)
assert len(saved_dataset.train_labels) == len(saved_dataset.train_features)
num_valid = len(saved_dataset.valid_labels)
assert len(saved_dataset.valid_labels) == len(saved_dataset.valid_features)
num_test = len(saved_dataset.test_labels)
assert len(saved_dataset.test_labels) == len(saved_dataset.test_features)
if num_train == 0 or num_valid == 0:
raise ValueError('Number of train/valid examples'
' must be more than zero.')
feature_size = len(saved_dataset.train_features[0].features)
train_data = np.zeros((num_train, feature_size))
train_labels = np.zeros(num_train)
for i in range(num_train):
train_labels[i] = saved_dataset.train_labels[i]
for j in range(feature_size):
train_data[i][j] = saved_dataset.train_features[i].features[j]
valid_data = np.zeros((num_valid, feature_size))
valid_labels = np.zeros(num_valid)
for i in range(num_valid):
valid_labels[i] = saved_dataset.valid_labels[i]
for j in range(feature_size):
valid_data[i][j] = saved_dataset.valid_features[i].features[j]
if num_test > 0:
test_data = np.zeros((num_test, feature_size))
test_labels = np.zeros(num_test)
for i in range(num_test):
test_labels[i] = saved_dataset.test_labels[i]
for j in range(feature_size):
test_data[i][j] = saved_dataset.test_features[i].features[j]
else:
test_data = None
test_labels = None
return (train_data, train_labels, valid_data, valid_labels,
test_data, test_labels)
def get_dataset(
name, num_samples_per_class=None, class_ids=None, load_fn=tfds.load,
data_dir=None):
"""Get the subset of the MNIST dataset containing the selected digits.
Args:
name: name of the dataset. Currently support mnist and cifar10.
num_samples_per_class: number of samples for each class.
class_ids: a list of class ids that will be included. Set to None to
include all the classes.
load_fn: function to load datasets, used for unit test.
data_dir: the folder to load data from if it is already there, otherwise
download data to this folder.
Returns:
train_data: a matrix of all the flattened training images.
train_labels: a vector of all the training labels.
test_data: a matrix of all the flattened test images.
test_labels: a vector of all the test labels.
"""
# Load datasets.
dataset_dict = load_fn(
name, data_dir=data_dir, batch_size=-1)
# Whether the dataset is from tfds or given in unit test.
if load_fn == tfds.load:
train_set = tfds.as_numpy(dataset_dict[tfds.Split.TRAIN])
test_set = tfds.as_numpy(dataset_dict[tfds.Split.TEST])
else:
train_set = dataset_dict[tfds.Split.TRAIN]
test_set = dataset_dict[tfds.Split.TEST]
train_data, train_labels = train_set['image'], train_set['label']
test_data, test_labels = test_set['image'], test_set['label']
train_data = train_data.astype(np.float)
test_data = test_data.astype(np.float)
assert train_data.shape[0] == train_labels.shape[0]
assert test_data.shape[0] == test_labels.shape[0]
if name == 'mnist':
width = 28
height = 28
channel = 1
elif name == 'cifar10':
width = 32
height = 32
channel = 3
else:
raise ValueError('Dataset {} not supported!'.format(name))
dim = width * height * channel
train_data = train_data.reshape([-1, dim])
test_data = test_data.reshape([-1, dim])
if class_ids is not None:
def select_classes(data, labels):
data_list = [
data[labels == class_id][:num_samples_per_class]
for class_id in class_ids]
labels_list = [
labels[labels == class_id][:num_samples_per_class]
for class_id in class_ids]
selected_data = np.concatenate(data_list, axis=0)
selected_labels = np.concatenate(labels_list, axis=0)
return selected_data, selected_labels
train_data, train_labels = select_classes(train_data, train_labels)
test_data, test_labels = select_classes(test_data, test_labels)
assert train_data.shape[0] == train_labels.shape[0]
assert test_data.shape[0] == test_labels.shape[0]
return (train_data, train_labels, test_data, test_labels)
def train_valid_test_split(
data, labels,
num_train_examples, num_valid_examples, num_test_examples,
seed, use_stratify=True):
"""Split data into train, valid and test with given seed."""
if num_test_examples > 0:
if use_stratify:
stratify = labels
else:
stratify = None
train_data, test_data, train_labels, test_labels = (
sklearn.model_selection.train_test_split(
data, labels,
train_size=(
num_train_examples +
num_valid_examples),
test_size=num_test_examples,
random_state=seed, stratify=stratify))
else:
train_data, train_labels = data, labels
test_data = None
test_labels = None
if use_stratify:
stratify = train_labels
else:
stratify = None
train_data, valid_data, train_labels, valid_labels = (
sklearn.model_selection.train_test_split(
train_data, train_labels,
train_size=num_train_examples,
test_size=num_valid_examples,
random_state=seed, stratify=stratify))
return (
train_data, train_labels,
valid_data, valid_labels,
test_data, test_labels)
def main(unused_argv):
"""Create and save the datasets."""
del unused_argv
if not os.path.exists(FLAGS.data_dir):
os.makedirs(FLAGS.data_dir)
tfds_cached_dict = {}
data_dir = FLAGS.tfds_data_dir if FLAGS.tfds_data_dir else None
name = FLAGS.dataset_name
tfds_cached_dict[name] = tfds.load(name, batch_size=-1, data_dir=data_dir)
dataset_dict = tfds_cached_dict[name]
dataset_dict[tfds.Split.TRAIN] = tfds.as_numpy(
dataset_dict[tfds.Split.TRAIN])
dataset_dict[tfds.Split.TEST] = tfds.as_numpy(
dataset_dict[tfds.Split.TEST])
# To mock the API of tfds.load to cache the downloaded datasets.
# Used as an argument to `get_dataset`.
def load_fn(name, data_dir=None, batch_size=-1):
# This function will always return the whole dataset.
assert batch_size == -1
del data_dir
del batch_size
return tfds_cached_dict[name]
class_ids = sorted([int(x) for x in FLAGS.class_ids])
num_classes = len(class_ids)
for i in range(num_classes):
for j in range(i+1, num_classes):
print('Generating pos {} neg {}'.format(i, j))
positive_class = class_ids[i]
negative_class = class_ids[j]
random_seeds = range(FLAGS.min_data_seed, FLAGS.max_data_seed)
for seed in random_seeds:
dataset = create_projected_binary_dataset(
FLAGS.dataset_name, positive_class, negative_class,
FLAGS.num_train_examples, FLAGS.num_valid_examples,
FLAGS.num_test_examples, FLAGS.projected_dim, seed, load_fn)
filename = 'binary_{}-pos_{}-neg_{}-dim_{}-seed_{}'.format(
FLAGS.dataset_name, positive_class, negative_class,
FLAGS.projected_dim, seed)
serialized_dataset = dataset.SerializeToString()
with open(os.path.join(FLAGS.data_dir, filename), 'wb') as f:
f.write(serialized_dataset)
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
ProkopHapala/SimpleSimulationEngine | python/pyMolecular/plotUtils.py | 1 | 1362 |
import numpy as np
import matplotlib.pyplot as plt
from elements import ELEMENTS
from matplotlib import collections as mc
def plotAtoms( es, xs, ys, scale=0.9, edge=True, ec='k', color='w' ):
'''
sizes = [ ELEMENTS[ int(ei) ][7]*scale for ei in es ]
colors = [ '#%02x%02x%02x' %(ELEMENTS[ int(ei) ][8]) for ei in es ]
print("sizes ",sizes)
print("colors", colors)
plt.scatter(xs,ys,s=sizes,c=colors)
'''
plt.fig = plt.gcf()
ax = plt.fig.gca()
plt.scatter(xs,ys)
for i in range( len(es) ):
element = ELEMENTS[ int(es[i]) ]
atomSize = element[7]*0.4
fc = '#%02x%02x%02x' %element[8]
if not edge:
ec=fc
circle=plt.Circle( ( xs[i], ys[i] ), atomSize, fc=fc, ec=ec )
ax.add_artist(circle)
def plotBonds( bonds, xs, ys, color='k', width=1 ):
lines = [ ((xs[i],ys[i]),(xs[j],ys[j])) for (i,j) in bonds ]
lc = mc.LineCollection( lines, colors=color, linewidths=width )
plt.fig = plt.gcf()
ax = plt.fig.gca()
ax.add_collection(lc)
'''
for b in bonds:
i=b[0]; j=b[1]
lines = [ ( )])
#plt.arrow( xs[i], ys[i], xs[j]-xs[i], xs[j]-ys[i], head_width=0.0, head_length=0.0, fc='k', ec='k', lw= 1.0,ls='solid' )
plt.draw.line(((xs[i],ys[i]),(xs[j],ys[j])), fill=color, width=width )
'''
| mit |
annayqho/TheCannon | presentations/madrid_meeting/madrid_talk.py | 1 | 1579 | import numpy as np
import pyfits
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
rc('font', family='serif')
apstar_file = 'apStar-r5-2M07101078+2931576.fits'
file_in = pyfits.open(apstar_file)
flux = file_in[1].data[0,:]
err = file_in[2].data[0,:]
bad = err > 4.5
flux_masked = flux[~bad]
npixels = len(flux)
start_wl = file_in[1].header['CRVAL1']
diff_wl = file_in[1].header['CDELT1']
val = diff_wl * (npixels) + start_wl
wl_full_log = np.arange(start_wl,val, diff_wl)
wl_full = [10 ** aval for aval in wl_full_log]
apogee_wl = np.array(wl_full)
wl = apogee_wl[~bad]
plot(wl, flux_masked, c='k', linewidth=0.5)
title("APOGEE Combined Spectrum", fontsize=30)
ylabel("Flux", fontsize=30)
xlabel(r"Wavelength (Angstroms)", fontsize=30)
plt.tick_params(axis='both', which='major', labelsize=30)
savefig("apogee_combined_spec.png")
lamost_wl = np.load("../examples/test_training_overlap/tr_data_raw.npz")['arr_1']
tr_flux_norm = np.load("../examples/test_training_overlap/tr_norm.npz")['arr_0']
tr_flux = np.load("../examples/test_training_overlap/tr_data_raw.npz")['arr_2']
tr_cont = np.load("../examples/test_training_overlap/tr_cont.npz")['arr_0']
lamost_flux_norm = tr_flux_norm[2,:]
lamost_flux = tr_flux[2,:]
cont = tr_cont[2,:]
plot(lamost_wl, lamost_flux, c='k', linewidth=0.5)
xlim(3800, 9100)
axhline(y=1, c='r')
title("LAMOST Normalized Spectrum", fontsize=30)
ylabel("Flux", fontsize=30)
xlabel(r"Wavelength (Angstroms)", fontsize=30)
plt.tick_params(axis='both', which='major', labelsize=30)
savefig("lamost_normalized_spec.png")
| mit |
Winand/pandas | pandas/core/computation/ops.py | 15 | 15900 | """Operator classes for eval.
"""
import operator as op
from functools import partial
from datetime import datetime
import numpy as np
from pandas.core.dtypes.common import is_list_like, is_scalar
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.base import StringMixin
from pandas.core.computation.common import _ensure_decoded, _result_type_many
from pandas.core.computation.scope import _DEFAULT_GLOBALS
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs')
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def isscalar(self):
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __unicode__(self):
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(pprint_thing(opr))
for opr in self.operands)
return pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def isscalar(self):
return all(operand.isscalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, acceptable_dtypes, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
.. versionadded:: 0.19.0
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
res = pd.eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.isscalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.isscalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.isscalar or self.rhs.isscalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
# do not upcast float32s to float64 un-necessarily
acceptable_dtypes = [np.float32, np.float_]
_cast_inplace(com.flatten(self), acceptable_dtypes, np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return pprint_thing('{0}({1})'.format(self.op, self.operand))
@property
def return_type(self):
operand = self.operand
if operand.return_type == np.dtype('bool'):
return np.dtype('bool')
if (isinstance(operand, Op) and
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
class MathCall(Op):
def __init__(self, func, args):
super(MathCall, self).__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
with np.errstate(all='ignore'):
return self.func.func(*operands)
def __unicode__(self):
operands = map(str, self.operands)
return pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
class FuncNode(object):
def __init__(self, name):
if name not in _mathops:
raise ValueError(
"\"{0}\" is not a supported function".format(name))
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
| bsd-3-clause |
sho-87/python-machine-learning | ANN/xor.py | 1 | 2575 | # XOR gate. ANN with 1 hidden layer
import numpy as np
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
# Set inputs and correct output values
inputs = [[0,0], [1,1], [0,1], [1,0]]
outputs = [0, 0, 1, 1]
# Set training parameters
alpha = 0.1 # Learning rate
training_iterations = 50000
hidden_layer_nodes = 3
# Define tensors
x = T.matrix("x")
y = T.vector("y")
b1 = theano.shared(value=1.0, name='b1')
b2 = theano.shared(value=1.0, name='b2')
# Set random seed
rng = np.random.RandomState(2345)
# Initialize weights
w1_array = np.asarray(
rng.uniform(low=-1, high=1, size=(2, hidden_layer_nodes)),
dtype=theano.config.floatX) # Force type to 32bit float for GPU
w1 = theano.shared(value=w1_array, name='w1', borrow=True)
w2_array = np.asarray(
rng.uniform(low=-1, high=1, size=(hidden_layer_nodes, 1)),
dtype=theano.config.floatX) # Force type to 32bit float for GPU
w2 = theano.shared(value=w2_array, name='w2', borrow=True)
# Theano symbolic expressions
a1 = T.nnet.sigmoid(T.dot(x, w1) + b1) # Input -> Hidden
a2 = T.nnet.sigmoid(T.dot(a1, w2) + b2) # Hidden -> Output
hypothesis = T.flatten(a2) # This needs to be flattened so
# hypothesis (matrix) and
# y (vector) have the same shape
# cost = T.sum((y - hypothesis) ** 2) # Quadratic/squared error loss
# cost = -(y*T.log(hypothesis) + (1-y)*T.log(1-hypothesis)).sum() # Manual CE
# cost = T.nnet.categorical_crossentropy(hypothesis, y) # Categorical CE
cost = T.nnet.binary_crossentropy(hypothesis, y).mean() # Binary CE
updates_rules = [
(w1, w1 - alpha * T.grad(cost, wrt=w1)),
(w2, w2 - alpha * T.grad(cost, wrt=w2)),
(b1, b1 - alpha * T.grad(cost, wrt=b1)),
(b2, b2 - alpha * T.grad(cost, wrt=b2))
]
# Theano compiled functions
train = theano.function(inputs=[x, y], outputs=[hypothesis, cost],
updates=updates_rules)
predict = theano.function(inputs=[x], outputs=[hypothesis])
# Training
cost_history = []
for i in range(training_iterations):
if (i+1) % 5000 == 0:
print "Iteration #%s: " % str(i+1)
print "Cost: %s" % str(cost)
h, cost = train(inputs, outputs)
cost_history.append(cost)
# Plot training curve
plt.plot(range(1, len(cost_history)+1), cost_history)
plt.grid(True)
plt.xlim(1, len(cost_history))
plt.ylim(0, max(cost_history))
plt.title("Training Curve")
plt.xlabel("Iteration #")
plt.ylabel("Cost")
# Predictions
test_data = [[0,0], [1,1], [0,1], [1,0]]
predictions = predict(test_data)
print predictions | mit |
ViDA-NYU/data-polygamy | sigmod16/performance-evaluation/nyc-urban/pruning/pruning.py | 1 | 5741 | # Copyright (C) 2016 New York University
# This file is part of Data Polygamy which is released under the Revised BSD License
# See file LICENSE for full license details.
import os
import sys
import math
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('font', family='sans-serif')
import matplotlib.pyplot as plt
import matplotlib.font_manager as font
from operator import itemgetter
if sys.argv[1] == "help":
print "[dir] [events] [permutation] [temp res] [spatial res] [y-axis-label]"
sys.exit(0)
scores = ["0.6", "0.8"]
scores_color = {"0.6": "#0099CC",
"0.8": "#66CCFF"}
dir = sys.argv[1]
events = sys.argv[2]
perm = sys.argv[3]
temp_res = sys.argv[4]
spatial_res = sys.argv[5]
use_y_label = eval(sys.argv[6])
def generate_data(score, events, perm, temp_res, spatial_res):
partial_filename = score + "-" + events + "-" + perm
partial_filename += "-" + temp_res + "-" + spatial_res + "-"
# searching for files
sparsity_files = []
files = os.listdir(dir)
for file in files:
if file.startswith(partial_filename):
n_datasets = int(file.split("-")[5].replace(".out",""))
sparsity_files.append((n_datasets, os.path.join(dir,file)))
sparsity_files = sorted(sparsity_files, key=itemgetter(0))
# getting x and y axis
sign_edges_data = []
sign_edges_ = []
score_edges_data = []
score_edges_ = []
max_edges_data = []
max_edges_ = []
for file in sparsity_files:
f = open(file[1])
x = int(f.readline().split(":")[1].strip())
att = int(f.readline().split(":")[1].strip())
max_edges_str = f.readline().split(":")
max_edges = float(max_edges_str[1].strip())
sign_edges_str = f.readline().split(":")
sign_edges = float(sign_edges_str[1].strip())
score_edges_str = f.readline().split(":")
score_edges = float(score_edges_str[1].strip())
f.close()
sign_edges_log = -1
try:
sign_edges_log = math.log10(sign_edges)
except:
pass
score_edges_log = -1
try:
score_edges_log = math.log10(score_edges)
except:
pass
max_edges_log = -1
try:
max_edges_log = math.log10(max_edges)
except:
pass
sign_edges_data.append((x,sign_edges_log))
sign_edges_.append((x,sign_edges))
score_edges_data.append((x,score_edges_log))
score_edges_.append((x,score_edges))
max_edges_data.append((x,max_edges_log))
max_edges_.append((x,max_edges))
return (max_edges_data, sign_edges_data, score_edges_data,
max_edges_, sign_edges_, score_edges_)
# plots
xlabel = "Number of Data Sets"
ylabel = "Number of Relationships"
plt.figure(figsize=(8, 6), dpi=80)
f, ax = plt.subplots()
ax.set_axis_bgcolor("#E0E0E0")
output = ""
refs = []
legend = []
sign_edges_data = []
max_edges_data = []
for score in scores:
score_data = []
data = generate_data(score, events, perm, temp_res, spatial_res)
(max_edges_data, sign_edges_data, score_data, max_edges_, sign_edges_, score_edges_) = data
line, = ax.plot([x[0] for x in score_data],
[x[1] for x in score_data],
color=scores_color[score],
linestyle='-',
linewidth=2.0)
#ax.plot([x[0] for x in score_data],
# [x[1] for x in score_data],
# linestyle='None',
# mec="k",
# mfc="k",
# marker='x',
# ms=8.0,
# mew=1.5)
output += score + ": " + str(score_edges_) + "\n"
refs.append(line)
legend.append(r"Significant Relationships, $|\tau| \geq %.2f$" %(float(score)))
output += "Significant: " + str(sign_edges_) + "\n"
output += "Max: " + str(max_edges_) + "\n"
line1, = ax.plot([x[0] for x in sign_edges_data],
[x[1] for x in sign_edges_data],
color="#003399",
linestyle='-',
linewidth=2.0)
#ax.plot([x[0] for x in sign_edges_data],
# [x[1] for x in sign_edges_data],
# linestyle='None',
# mec="k",
# mfc="k",
# marker='x',
# ms=8.0,
# mew=1.5)
line2, = ax.plot([x[0] for x in max_edges_data],
[x[1] for x in max_edges_data],
'k--',
linewidth=2.0)
#ax.plot([x[0] for x in max_edges_data],
# [x[1] for x in max_edges_data],
# linestyle='None',
# mec="k",
# mfc="k",
# marker='x',
# ms=8.0,
# mew=1.5)
refs = [line2, line1] + refs
legend = ["Possible Relationships", "Significant Relationships"] + legend
plot_legend = ax.legend(refs, legend, handlelength=3, loc='upper left', borderpad=0.5, shadow=True, prop={'size':15,'style':'italic'})
plot_legend.get_frame().set_lw(0.5)
ax.set_yticks([0,1,2,3,4])
ax.set_yticklabels(["1","10","100","1K","10K"])
ax.set_ylim(-1.1, 5)
ax.tick_params(axis='both', labelsize=22)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.set_xlim(xmin=2)
ax.set_xlabel(xlabel,fontproperties=font.FontProperties(size=22,weight='bold'))
if (use_y_label):
ax.set_ylabel(ylabel,fontproperties=font.FontProperties(size=22,weight='bold'))
ax.grid(b=True, axis='both', color='w', linestyle='-', linewidth=0.7)
ax.set_axisbelow(True)
filename = events + "-" + perm + "-" + temp_res + "-" + spatial_res
plt.savefig(filename + ".png", bbox_inches='tight', pad_inches=0.05)
f = open(filename + ".out", "w")
f.write(output)
f.close()
plt.clf()
| bsd-3-clause |
LeeYiFang/Carkinos | src/cv.py | 1 | 2729 | from pathlib import Path
import pandas as pd
import numpy as np
import django
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'Carkinos.settings.local'
django.setup()
from probes.models import Dataset,Platform,Sample,CellLine,ProbeID
root=Path('../').resolve()
u133a_path=root.joinpath('src','raw','Affy_U133A_probe_info.csv')
plus2_path=root.joinpath('src','raw','Affy_U133plus2_probe_info.csv')
u133a=pd.read_csv(u133a_path.as_posix())
plus2=pd.read_csv(plus2_path.as_posix())
sanger_val_pth=Path('../').resolve().joinpath('src','sanger_cell_line_proj.npy')
nci_val_pth=Path('../').resolve().joinpath('src','nci60.npy')
gse_val_pth=Path('../').resolve().joinpath('src','GSE36133.npy')
sanger_val=np.load(sanger_val_pth.as_posix(),mmap_mode='r')
nci_val=np.load(nci_val_pth.as_posix(),mmap_mode='r')
gse_val=np.load(gse_val_pth.as_posix(),mmap_mode='r')
plus2.SYMBOL.fillna('', inplace=True)
u133a.SYMBOL.fillna('', inplace=True)
#this is now for all platform that can find
#ugene=list(set(list(pd.unique(plus2.SYMBOL))+list(pd.unique(u133a.SYMBOL))))
ugene=list(pd.unique(u133a.SYMBOL))
ugene.remove('')
#ugene has all the gene symbols in U133A and U133PlUS2
#a_uni=list(pd.unique(u133a.SYMBOL))
#two_uni=list(pd.unique(plus2.SYMBOL))
#ugene=list(set(a_uni).intersection(two_uni))
#ugene.remove('')
#sanger=798,nci60=174,gse=917
sanger_offset=Sample.objects.filter(dataset_id=1).values_list('offset',flat=True)
nci60_offset=Sample.objects.filter(dataset_id__name__in=['NCI60']).values_list('offset',flat=True)
gse_offset=Sample.objects.filter(dataset_id__name__in=['GSE36133']).values_list('offset',flat=True)
min=10000000000
min_gene=[]
for gene in ugene:
aprobe=ProbeID.objects.filter(platform=1,Gene_symbol=gene)
pprobe=ProbeID.objects.filter(platform=3,Gene_symbol=gene)
aoffset=aprobe.values_list('offset',flat=True)
aprobe_length=len(aoffset)
poffset=pprobe.values_list('offset',flat=True)
pprobe_length=len(poffset)
sanger_sample=sanger_val[np.ix_(aoffset,sanger_offset)]
sanger_sum=np.sum(sanger_sample)
nci_sample=nci_val[np.ix_(poffset,nci60_offset)]
nci_sum=np.sum(nci_sample)
gse_sample=gse_val[np.ix_(poffset,gse_offset)]
gse_sum=np.sum(gse_sample)
mean=(sanger_sum+nci_sum+gse_sum)/(aprobe_length*798+pprobe_length*1091)
sanger_square=np.sum(np.square(np.subtract(sanger_sample,mean)))
nci_square=np.sum(np.square(np.subtract(nci_sample,mean)))
gse_square=np.sum(np.square(np.subtract(gse_sample,mean)))
std=((sanger_square+nci_square+gse_square)/(aprobe_length*798+pprobe_length*1091))**0.5
temp=std/mean
if min>temp:
min=temp
min_gene=gene
print(min)
print(min_gene)
| mit |
ryklith/pyltesim | plotting/JSACplot.py | 1 | 2333 | #!/usr/bin/env python
''' Recreates the central JSAC plot: Data rate per user vs supply power consumption.
File: JSACplot.py
'''
__author__ = "Hauke Holtkamp"
__credits__ = "Hauke Holtkamp"
__license__ = "unknown"
__version__ = "unknown"
__maintainer__ = "Hauke Holtkamp"
__email__ = "h.holtkamp@gmail.com"
__status__ = "Development"
import results.resultshandler as rh
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import datetime
def savePlot(filename):
"""Save MPL plot to pdf."""
timesuffix = datetime.datetime.now().strftime("_%y_%m_%d_%I-%M-%S%p")
if filename is None:
filename = "JSACplot"
plt.savefig(filename+timesuffix+'.pdf', format='pdf')
print filename+timesuffix+'.pdf saved'
plt.savefig(filename+timesuffix+'.png', format='png')
print filename+timesuffix+'.png saved'
def showPlot():
"""Draw to screen."""
plt.show()
def plotFromData(filename=None):
"""Pull data and create plot"""
import itertools
colors = itertools.cycle(['r','g','b','c','m','y','k'])
if filename is None:
data, filename = rh.loadBin() # loads most recent npz file. data is 2d-array
else:
raise NotImplementedError('Cannot load particular filename yet')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title('Power consumption as a function of user rate\n' + filename)
plt.xlabel('User rate in bps')
plt.ylabel('Supply power consumption in Watt')
data = data.transpose()
xdata = data[:,0]
p = []
for i in np.arange(1,np.shape(data)[1] ):
color = colors.next()
p.append(plt.plot(xdata, data[:,i],color,label='test')[0])
plt.legend([p[0], p[1], p[2], p[3]], ['Theoretical bound','SOTA','After optimization','After quantization'], loc=4) # bottom right
return ax, filename # TODO the return values are not used, as pyplot keeps track of 'current plot' internally
if __name__ == '__main__':
import sys
# If this is called as a script, it plots the most recent results file.
if len(sys.argv) >= 2:
filename = sys.argv[1]
else:
filename = None
path = 'plotting/'
targetfilename = 'JSAC'
# Create plot
plot = plotFromData(filename)
# Draw to screen and save
savePlot(path+targetfilename)
# showPlot()
| gpl-2.0 |
pgm/StarCluster | utils/scimage_12_04.py | 20 | 17216 | #!/usr/bin/env python
"""
This script is meant to be run inside of a ubuntu cloud image available at
uec-images.ubuntu.com::
$ EC2_UBUNTU_IMG_URL=http://uec-images.ubuntu.com/precise/current
$ wget $EC2_UBUNTU_IMG_URL/precise-server-cloudimg-amd64.tar.gz
or::
$ wget $EC2_UBUNTU_IMG_URL/precise-server-cloudimg-i386.tar.gz
After downloading a Ubuntu cloud image the next step is to extract the image::
$ tar xvzf precise-server-cloudimg-amd64.tar.gz
Then resize it to 10GB::
$ e2fsck -f precise-server-cloudimg-amd64.img
$ resize2fs precise-server-cloudimg-amd64.img 10G
Next you need to mount the image::
$ mkdir /tmp/img-mount
$ mount precise-server-cloudimg-amd64.img /tmp/img-mount
$ mount -t proc none /tmp/img-mount/proc
$ mount -t sysfs none /tmp/img-mount/sys
$ mount -o bind /dev /tmp/img-mount/dev
$ mount -t devpts none /tmp/img-mount/dev/pts
$ mount -o rbind /var/run/dbus /tmp/img-mount/var/run/dbus
Copy /etc/resolv.conf and /etc/mtab to the image::
$ mkdir -p /tmp/img-mount/var/run/resolvconf
$ cp /etc/resolv.conf /tmp/img-mount/var/run/resolvconf/resolv.conf
$ grep -v rootfs /etc/mtab > /tmp/img-mount/etc/mtab
Next copy this script inside the image::
$ cp /path/to/scimage.py /tmp/img-mount/root/scimage.py
Finally chroot inside the image and run this script:
$ chroot /tmp/img-mount /bin/bash
$ cd $HOME
$ python scimage.py
"""
import os
import sys
import glob
import shutil
import fileinput
import subprocess
import multiprocessing
SRC_DIR = "/usr/local/src"
APT_SOURCES_FILE = "/etc/apt/sources.list"
BUILD_UTILS_PKGS = "build-essential devscripts debconf debconf-utils dpkg-dev "
BUILD_UTILS_PKGS += "gfortran llvm-3.2-dev swig cdbs patch python-dev "
BUILD_UTILS_PKGS += "python-distutils-extra python-setuptools python-pip "
BUILD_UTILS_PKGS += "python-nose"
CLOUD_CFG_FILE = '/etc/cloud/cloud.cfg'
GRID_SCHEDULER_GIT = 'git://github.com/jtriley/gridscheduler.git'
CLOUDERA_ARCHIVE_KEY = 'http://archive.cloudera.com/debian/archive.key'
CLOUDERA_APT = 'http://archive.cloudera.com/debian maverick-cdh3u5 contrib'
CONDOR_APT = 'http://www.cs.wisc.edu/condor/debian/development lenny contrib'
NUMPY_SCIPY_SITE_CFG = """\
[DEFAULT]
library_dirs = /usr/lib
include_dirs = /usr/include:/usr/include/suitesparse
[blas_opt]
libraries = ptf77blas, ptcblas, atlas
[lapack_opt]
libraries = lapack, ptf77blas, ptcblas, atlas
[amd]
amd_libs = amd
[umfpack]
umfpack_libs = umfpack
[fftw]
libraries = fftw3
"""
STARCLUSTER_MOTD = """\
#!/bin/sh
cat<<"EOF"
_ _ _
__/\_____| |_ __ _ _ __ ___| |_ _ ___| |_ ___ _ __
\ / __| __/ _` | '__/ __| | | | / __| __/ _ \ '__|
/_ _\__ \ || (_| | | | (__| | |_| \__ \ || __/ |
\/ |___/\__\__,_|_| \___|_|\__,_|___/\__\___|_|
StarCluster Ubuntu 12.04 AMI
Software Tools for Academics and Researchers (STAR)
Homepage: http://star.mit.edu/cluster
Documentation: http://star.mit.edu/cluster/docs/latest
Code: https://github.com/jtriley/StarCluster
Mailing list: starcluster@mit.edu
This AMI Contains:
* Open Grid Scheduler (OGS - formerly SGE) queuing system
* Condor workload management system
* OpenMPI compiled with Open Grid Scheduler support
* OpenBLAS - Highly optimized Basic Linear Algebra Routines
* NumPy/SciPy linked against OpenBlas
* IPython 0.13 with parallel and notebook support
* and more! (use 'dpkg -l' to show all installed packages)
Open Grid Scheduler/Condor cheat sheet:
* qstat/condor_q - show status of batch jobs
* qhost/condor_status- show status of hosts, queues, and jobs
* qsub/condor_submit - submit batch jobs (e.g. qsub -cwd ./job.sh)
* qdel/condor_rm - delete batch jobs (e.g. qdel 7)
* qconf - configure Open Grid Scheduler system
Current System Stats:
EOF
landscape-sysinfo | grep -iv 'graph this data'
"""
CLOUD_INIT_CFG = """\
user: ubuntu
disable_root: 0
preserve_hostname: False
# datasource_list: [ "NoCloud", "OVF", "Ec2" ]
cloud_init_modules:
- bootcmd
- resizefs
- set_hostname
- update_hostname
- update_etc_hosts
- rsyslog
- ssh
cloud_config_modules:
- mounts
- ssh-import-id
- locale
- set-passwords
- grub-dpkg
- timezone
- puppet
- chef
- mcollective
- disable-ec2-metadata
- runcmd
cloud_final_modules:
- rightscale_userdata
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- keys-to-console
- final-message
apt_sources:
- source: deb $MIRROR $RELEASE multiverse
- source: deb %(CLOUDERA_APT)s
- source: deb-src %(CLOUDERA_APT)s
- source: deb %(CONDOR_APT)s
""" % dict(CLOUDERA_APT=CLOUDERA_APT, CONDOR_APT=CONDOR_APT)
def run_command(cmd, ignore_failure=False, failure_callback=None,
get_output=False):
kwargs = {}
if get_output:
kwargs.update(dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE))
p = subprocess.Popen(cmd, shell=True, **kwargs)
output = []
if get_output:
line = None
while line != '':
line = p.stdout.readline()
if line != '':
output.append(line)
print line,
for line in p.stderr.readlines():
if line != '':
output.append(line)
print line,
retval = p.wait()
if retval != 0:
errmsg = "command '%s' failed with status %d" % (cmd, retval)
if failure_callback:
ignore_failure = failure_callback(retval)
if not ignore_failure:
raise Exception(errmsg)
else:
sys.stderr.write(errmsg + '\n')
if get_output:
return retval, ''.join(output)
return retval
def apt_command(cmd):
dpkg_opts = "Dpkg::Options::='--force-confnew'"
cmd = "apt-get -o %s -y --force-yes %s" % (dpkg_opts, cmd)
cmd = "DEBIAN_FRONTEND='noninteractive' " + cmd
run_command(cmd)
def apt_install(pkgs):
apt_command('install %s' % pkgs)
def chdir(directory):
opts = glob.glob(directory)
isdirlist = [o for o in opts if os.path.isdir(o)]
if len(isdirlist) > 1:
raise Exception("more than one dir matches: %s" % directory)
os.chdir(isdirlist[0])
def _fix_atlas_rules(rules_file='debian/rules'):
for line in fileinput.input(rules_file, inplace=1):
if 'ATLAS=None' not in line:
print line,
def configure_apt_sources():
srcfile = open(APT_SOURCES_FILE)
contents = srcfile.readlines()
srcfile.close()
srclines = []
for line in contents:
if not line.strip() or line.startswith('#'):
continue
parts = line.split()
if parts[0] == 'deb':
parts[0] = 'deb-src'
srclines.append(' '.join(parts).strip())
srcfile = open(APT_SOURCES_FILE, 'w')
srcfile.write(''.join(contents))
srcfile.write('\n'.join(srclines) + '\n')
srcfile.write('deb %s\n' % CLOUDERA_APT)
srcfile.write('deb-src %s\n' % CLOUDERA_APT)
srcfile.write('deb %s\n' % CONDOR_APT)
srcfile.close()
run_command('add-apt-repository ppa:staticfloat/julia-deps -y')
run_command('gpg --keyserver keyserver.ubuntu.com --recv-keys 0F932C9C')
run_command('curl -s %s | sudo apt-key add -' % CLOUDERA_ARCHIVE_KEY)
apt_install('debian-archive-keyring')
def upgrade_packages():
apt_command('update')
apt_command('upgrade')
def install_build_utils():
"""docstring for configure_build"""
apt_install(BUILD_UTILS_PKGS)
def install_gridscheduler():
chdir(SRC_DIR)
apt_command('build-dep gridengine')
if os.path.isfile('gridscheduler-scbuild.tar.gz'):
run_command('tar xvzf gridscheduler-scbuild.tar.gz')
run_command('mv gridscheduler /opt/sge6-fresh')
return
run_command('git clone %s' % GRID_SCHEDULER_GIT)
sts, out = run_command('readlink -f `which java`', get_output=True)
java_home = out.strip().split('/jre')[0]
chdir(os.path.join(SRC_DIR, 'gridscheduler', 'source'))
run_command('git checkout -t -b develop origin/develop')
env = 'JAVA_HOME=%s' % java_home
run_command('%s ./aimk -only-depend' % env)
run_command('%s scripts/zerodepend' % env)
run_command('%s ./aimk depend' % env)
run_command('%s ./aimk -no-secure -no-gui-inst' % env)
sge_root = '/opt/sge6-fresh'
os.mkdir(sge_root)
env += ' SGE_ROOT=%s' % sge_root
run_command('%s scripts/distinst -all -local -noexit -y -- man' % env)
def install_condor():
chdir(SRC_DIR)
run_command("rm /var/lock")
apt_install('condor=7.7.2-1')
run_command('echo condor hold | dpkg --set-selections')
run_command('ln -s /etc/condor/condor_config /etc/condor_config.local')
run_command('mkdir /var/lib/condor/log')
run_command('mkdir /var/lib/condor/run')
run_command('chown -R condor:condor /var/lib/condor/log')
run_command('chown -R condor:condor /var/lib/condor/run')
def install_torque():
chdir(SRC_DIR)
apt_install('torque-server torque-mom torque-client')
def install_pydrmaa():
chdir(SRC_DIR)
run_command('pip install drmaa')
def install_blas_lapack():
"""docstring for install_openblas"""
chdir(SRC_DIR)
apt_install("libopenblas-dev")
def install_numpy_scipy():
"""docstring for install_numpy"""
chdir(SRC_DIR)
run_command('pip install -d . numpy')
run_command('unzip numpy*.zip')
run_command("sed -i 's/return None #/pass #/' numpy*/numpy/core/setup.py")
run_command('pip install scipy')
def install_pandas():
"""docstring for install_pandas"""
chdir(SRC_DIR)
apt_command('build-dep pandas')
run_command('pip install pandas')
def install_matplotlib():
chdir(SRC_DIR)
run_command('pip install matplotlib')
def install_julia():
apt_install("libsuitesparse-dev libncurses5-dev "
"libopenblas-dev libarpack2-dev libfftw3-dev libgmp-dev "
"libunwind7-dev libreadline-dev zlib1g-dev")
buildopts = """\
BUILDOPTS="LLVM_CONFIG=llvm-config-3.2 USE_QUIET=0 USE_LIB64=0"; for lib in \
LLVM ZLIB SUITESPARSE ARPACK BLAS FFTW LAPACK GMP LIBUNWIND READLINE GLPK \
NGINX; do export BUILDOPTS="$BUILDOPTS USE_SYSTEM_$lib=1"; done"""
chdir(SRC_DIR)
if not os.path.exists("julia"):
run_command("git clone git://github.com/JuliaLang/julia.git")
run_command("%s && cd julia && make $BUILDOPTS PREFIX=/usr install" %
buildopts)
def install_mpi():
chdir(SRC_DIR)
apt_install('mpich2')
apt_command('build-dep openmpi')
apt_install('blcr-util')
if glob.glob('*openmpi*.deb'):
run_command('dpkg -i *openmpi*.deb')
else:
apt_command('source openmpi')
chdir('openmpi*')
for line in fileinput.input('debian/rules', inplace=1):
print line,
if '--enable-heterogeneous' in line:
print ' --with-sge \\'
def _deb_failure_callback(retval):
if not glob.glob('../*openmpi*.deb'):
return False
return True
run_command('dch --local=\'+custom\' '
'"custom build on: `uname -s -r -v -m -p -i -o`"')
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*openmpi*.deb')
sts, out = run_command('ompi_info | grep -i grid', get_output=True)
if 'gridengine' not in out:
raise Exception("failed to build OpenMPI with "
"Open Grid Scheduler support")
run_command('echo libopenmpi1.3 hold | dpkg --set-selections')
run_command('echo libopenmpi-dev hold | dpkg --set-selections')
run_command('echo libopenmpi-dbg hold | dpkg --set-selections')
run_command('echo openmpi-bin hold | dpkg --set-selections')
run_command('echo openmpi-checkpoint hold | dpkg --set-selections')
run_command('echo openmpi-common hold | dpkg --set-selections')
run_command('echo openmpi-doc hold | dpkg --set-selections')
run_command('pip install mpi4py')
def install_hadoop():
chdir(SRC_DIR)
hadoop_pkgs = ['namenode', 'datanode', 'tasktracker', 'jobtracker',
'secondarynamenode']
pkgs = ['hadoop-0.20'] + ['hadoop-0.20-%s' % pkg for pkg in hadoop_pkgs]
apt_install(' '.join(pkgs))
run_command('easy_install dumbo')
def install_ipython():
chdir(SRC_DIR)
apt_install('libzmq-dev')
run_command('pip install ipython tornado pygments pyzmq')
mjax_install = 'from IPython.external.mathjax import install_mathjax'
mjax_install += '; install_mathjax()'
run_command("python -c '%s'" % mjax_install)
def configure_motd():
for f in glob.glob('/etc/update-motd.d/*'):
os.unlink(f)
motd = open('/etc/update-motd.d/00-starcluster', 'w')
motd.write(STARCLUSTER_MOTD)
motd.close()
os.chmod(motd.name, 0755)
def configure_cloud_init():
"""docstring for configure_cloud_init"""
cloudcfg = open('/etc/cloud/cloud.cfg', 'w')
cloudcfg.write(CLOUD_INIT_CFG)
cloudcfg.close()
def configure_bash():
completion_line_found = False
for line in fileinput.input('/etc/bash.bashrc', inplace=1):
if 'bash_completion' in line and line.startswith('#'):
print line.replace('#', ''),
completion_line_found = True
elif completion_line_found:
print line.replace('#', ''),
completion_line_found = False
else:
print line,
aliasfile = open('/root/.bash_aliases', 'w')
aliasfile.write("alias ..='cd ..'\n")
aliasfile.close()
def setup_environ():
num_cpus = multiprocessing.cpu_count()
os.environ['MAKEFLAGS'] = '-j%d' % (num_cpus + 1)
os.environ['DEBIAN_FRONTEND'] = "noninteractive"
if os.path.isfile('/sbin/initctl') and not os.path.islink('/sbin/initctl'):
run_command('mv /sbin/initctl /sbin/initctl.bak')
run_command('ln -s /bin/true /sbin/initctl')
def install_nfs():
chdir(SRC_DIR)
run_command('initctl reload-configuration')
apt_install('nfs-kernel-server')
run_command('ln -s /etc/init.d/nfs-kernel-server /etc/init.d/nfs')
def install_default_packages():
# stop mysql for interactively asking for password
preseedf = '/tmp/mysql-preseed.txt'
mysqlpreseed = open(preseedf, 'w')
preseeds = """\
mysql-server mysql-server/root_password select
mysql-server mysql-server/root_password seen true
mysql-server mysql-server/root_password_again select
mysql-server mysql-server/root_password_again seen true
"""
mysqlpreseed.write(preseeds)
mysqlpreseed.close()
run_command('debconf-set-selections < %s' % mysqlpreseed.name)
run_command('rm %s' % mysqlpreseed.name)
pkgs = ["git", "mercurial", "subversion", "cvs", "vim", "vim-scripts",
"emacs", "tmux", "screen", "zsh", "ksh", "csh", "tcsh", "encfs",
"keychain", "unzip", "rar", "unace", "ec2-api-tools",
"ec2-ami-tools", "mysql-server", "mysql-client", "apache2",
"libapache2-mod-wsgi", "sysv-rc-conf", "pssh", "cython", "irssi",
"htop", "mosh", "default-jdk", "xvfb", "python-imaging",
"python-ctypes"]
apt_install(' '.join(pkgs))
def install_python_packges():
pypkgs = ['python-boto', 'python-paramiko', 'python-django',
'python-pudb']
for pypkg in pypkgs:
if pypkg.startswith('python-'):
apt_command('build-dep %s' % pypkg.split('python-')[1])
run_command('pip install %s')
def configure_init():
for script in ['nfs-kernel-server', 'hadoop', 'condor', 'apache', 'mysql']:
run_command('find /etc/rc* -iname \*%s\* -delete' % script)
def cleanup():
run_command('rm -f /etc/resolv.conf')
run_command('rm -rf /var/run/resolvconf')
run_command('rm -f /etc/mtab')
run_command('rm -rf /root/*')
exclude = ['/root/.bashrc', '/root/.profile', '/root/.bash_aliases']
for dot in glob.glob("/root/.*"):
if dot not in exclude:
run_command('rm -rf %s' % dot)
for path in glob.glob('/usr/local/src/*'):
if os.path.isdir(path):
shutil.rmtree(path)
run_command('rm -f /var/cache/apt/archives/*.deb')
run_command('rm -f /var/cache/apt/archives/partial/*')
for f in glob.glob('/etc/profile.d'):
if 'byobu' in f:
run_command('rm -f %s' % f)
if os.path.islink('/sbin/initctl') and os.path.isfile('/sbin/initctl.bak'):
run_command('mv -f /sbin/initctl.bak /sbin/initctl')
def main():
"""docstring for main"""
if os.getuid() != 0:
sys.stderr.write('you must be root to run this script\n')
return
setup_environ()
configure_motd()
configure_cloud_init()
configure_bash()
configure_apt_sources()
upgrade_packages()
install_build_utils()
install_default_packages()
install_gridscheduler()
install_condor()
#install_torque()
install_pydrmaa()
install_blas_lapack()
install_numpy_scipy()
install_matplotlib()
install_pandas()
install_ipython()
install_mpi()
install_hadoop()
install_nfs()
install_julia()
configure_init()
cleanup()
if __name__ == '__main__':
main()
| gpl-3.0 |
jendap/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans.py | 27 | 11083 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of k-means clustering on top of `Estimator` API (deprecated).
This module is deprecated. Please use
`tf.contrib.factorization.KMeansClustering` instead of
`tf.contrib.learn.KMeansClustering`. It has a similar interface, but uses the
`tf.estimator.Estimator` API instead of `tf.contrib.learn.Estimator`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.deprecation import deprecated
_USE_TF_CONTRIB_FACTORIZATION = (
'Please use tf.contrib.factorization.KMeansClustering instead of'
' tf.contrib.learn.KMeansClustering. It has a similar interface, but uses'
' the tf.estimator.Estimator API instead of tf.contrib.learn.Estimator.')
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, tolerance):
"""Initializes _LossRelativeChangeHook.
Args:
tolerance: A relative tolerance of change between iterations.
"""
self._tolerance = tolerance
self._prev_loss = None
def begin(self):
self._loss_tensor = ops.get_default_graph().get_tensor_by_name(
KMeansClustering.LOSS_OP_NAME + ':0')
assert self._loss_tensor is not None
def before_run(self, run_context):
del run_context
return SessionRunArgs(
fetches={KMeansClustering.LOSS_OP_NAME: self._loss_tensor})
def after_run(self, run_context, run_values):
loss = run_values.results[KMeansClustering.LOSS_OP_NAME]
assert loss is not None
if self._prev_loss is not None:
relative_change = (abs(loss - self._prev_loss) /
(1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_tensor_or_dict(features):
"""Helper function to parse features."""
if isinstance(features, dict):
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
features = array_ops.concat([features[k] for k in keys], 1)
return features
def _kmeans_clustering_model_fn(features, labels, mode, params, config):
"""Model function for KMeansClustering estimator."""
assert labels is None, labels
(all_scores, model_predictions, losses,
is_initialized, init_op, training_op) = clustering_ops.KMeans(
_parse_tensor_or_dict(features),
params.get('num_clusters'),
initial_clusters=params.get('training_initial_clusters'),
distance_metric=params.get('distance_metric'),
use_mini_batch=params.get('use_mini_batch'),
mini_batch_steps_per_iteration=params.get(
'mini_batch_steps_per_iteration'),
random_seed=params.get('random_seed'),
kmeans_plus_plus_num_retries=params.get(
'kmeans_plus_plus_num_retries')).training_graph()
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
summary.scalar('loss/raw', loss)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
KMeansClustering.ALL_SCORES: all_scores[0],
KMeansClustering.CLUSTER_IDX: model_predictions[0],
}
eval_metric_ops = {KMeansClustering.SCORES: loss}
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
relative_tolerance = params.get('relative_tolerance')
if relative_tolerance is not None:
training_hooks.append(_LossRelativeChangeHook(relative_tolerance))
return ModelFnOps(
mode=mode,
predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss,
train_op=training_op,
training_hooks=training_hooks)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
SCORES = 'scores'
CLUSTER_IDX = 'cluster_idx'
CLUSTERS = 'clusters'
ALL_SCORES = 'all_scores'
LOSS_OP_NAME = 'kmeans_loss'
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None):
"""Creates a model for running KMeans training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
initial_clusters: specifies how to initialize the clusters for training.
See clustering_ops.kmeans for the possible values.
distance_metric: the distance metric used for clustering.
See clustering_ops.kmeans for the possible values.
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy. See clustering_ops.py
for more details.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
Note that this may not work correctly if use_mini_batch=True.
config: See Estimator
"""
params = {}
params['num_clusters'] = num_clusters
params['training_initial_clusters'] = initial_clusters
params['distance_metric'] = distance_metric
params['random_seed'] = random_seed
params['use_mini_batch'] = use_mini_batch
params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration
params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries
params['relative_tolerance'] = relative_tolerance
super(KMeansClustering, self).__init__(
model_fn=_kmeans_clustering_model_fn,
params=params,
model_dir=model_dir,
config=config)
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def predict_cluster_idx(self, input_fn=None):
"""Yields predicted cluster indices."""
key = KMeansClustering.CLUSTER_IDX
results = super(KMeansClustering, self).predict(
input_fn=input_fn, outputs=[key])
for result in results:
yield result[key]
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def score(self, input_fn=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
return np.sum(
self.evaluate(
input_fn=input_fn, steps=steps)[KMeansClustering.SCORES])
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def transform(self, input_fn=None, as_iterable=False):
"""Transforms each element to distances to cluster centers.
Note that this function is different from the corresponding one in sklearn.
For SQUARED_EUCLIDEAN distance metric, sklearn transform returns the
EUCLIDEAN distance, while this function returns the SQUARED_EUCLIDEAN
distance.
Args:
input_fn: see predict.
as_iterable: see predict
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
key = KMeansClustering.ALL_SCORES
results = super(KMeansClustering, self).predict(
input_fn=input_fn,
outputs=[key],
as_iterable=as_iterable)
if not as_iterable:
return results[key]
else:
return results
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def clusters(self):
"""Returns cluster centers."""
return super(KMeansClustering, self).get_variable_value(self.CLUSTERS)
| apache-2.0 |
georgetown-analytics/housing-risk | code/prediction/run_dc_models.py | 1 | 2169 |
import run_models
import pickle
import pandas
def get_dc_decisions_table():
database_connection = run_models.data_utilities.database_management.get_database_connection('database')
query_path = "select_dc_buildings.sql"
file = open(query_path, 'r')
query_text = file.read()
file.close()
query_dataframe = run_models.pandas.read_sql(query_text, database_connection)
return query_dataframe
def predict_dc_models(dataframe):
only_testing_fields_dataframe = dataframe[['median_rent', 'contract_term_months_qty',
'previous_contract_term_months', 'assisted_units_count',
'rent_to_fmr_ratio', 'br0_count', 'br1_count', 'br2_count', 'br3_count',
'br4_count', 'br5_count', 'program_type_group_name',
'is_hud_administered_ind', 'is_acc_old_ind',
'is_acc_performance_based_ind', 'is_hud_owned_ind',
'owner_company_type', 'mgmt_agent_company_type',
'primary_financing_type']]
filename = "completed_models\\for_presentation_under_sampling_modeler.pickle"
with open(filename, 'rb') as f:
modeler = pickle.load(f)
updated_modeler = run_models.predict_all_models(only_testing_fields_dataframe, modeler, debug=False)
print(updated_modeler.answers.head())
return updated_modeler
if __name__ == '__main__':
dataframe = get_dc_decisions_table()
only_identifying_fields = dataframe[['decision_data_year', 'altered_decision_data_year', 'rent_snapshot_id',
'contract_snapshot_id', 'contract_number', 'property_name_text',
'owner_organization_name', 'address', 'city', 'state', 'geoid',
'geo_id2']]
updated_modeler = predict_dc_models(dataframe)
predicted_answers = updated_modeler.answers
rejoined_dataframe = pandas.concat([only_identifying_fields, predicted_answers], axis=1)
simple_dataframe_predictions = rejoined_dataframe[['contract_number','property_name_text','RandomForest']]
simple_dataframe_predictions.to_csv('predicted_dc_answers.csv')
| mit |
NeuroDataDesign/seelviz | Tony/scripts/atlasregiongraphWithLabels.py | 2 | 3220 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from __future__ import print_function
__author__ = 'seelviz'
from plotly.offline import download_plotlyjs
from plotly.graph_objs import *
from plotly import tools
import plotly
import os
#os.chdir('C:/Users/L/Documents/Homework/BME/Neuro Data I/Data/')
import csv,gc # garbage memory collection :)
import numpy as np
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import axes3d
# from mpl_toolkits.mplot3d import axes3d
# from collections import namedtuple
import csv
import re
import matplotlib
import time
import seaborn as sns
from collections import OrderedDict
class atlasregiongraph(object):
"""Class for generating the color coded atlas region graphs"""
def __init__(self, token, path=None):
self._token = token
self._path = path
data_txt = ""
if path == None:
data_txt = token + '/' + token + '.csv'
else:
data_txt = path + '/' + token + '.csv'
self._data = np.genfromtxt(data_txt, delimiter=',', dtype='int', usecols = (0,1,2,4), names=['x','y','z','region'])
def generate_atlas_region_graph(self, path=None, numRegions = 10):
font = {'weight' : 'bold',
'size' : 18}
matplotlib.rc('font', **font)
thedata = self._data
if path == None:
thedata = self._data
else:
### load data
thedata = np.genfromtxt(self._token + '/' + self._token + '.csv', delimiter=',', dtype='int', usecols = (0,1,2,4), names=['x','y','z','region'])
region_dict = OrderedDict()
for l in thedata:
trace = atlasCCF[str(l[3])]
#trace = 'trace' + str(l[3])
if trace not in region_dict:
region_dict[trace] = np.array([[l[0], l[1], l[2], l[3]]])
else:
tmp = np.array([[l[0], l[1], l[2], l[3]]])
region_dict[trace] = np.concatenate((region_dict.get(trace, np.zeros((1,4))), tmp), axis=0)
current_palette = sns.color_palette("husl", numRegions)
# print current_palette
data = []
for i, key in enumerate(region_dict):
trace = region_dict[key]
tmp_col = current_palette[i]
tmp_col_lit = 'rgb' + str(tmp_col)
trace_scatter = Scatter3d(
x = trace[:,0],
y = trace[:,1],
z = trace[:,2],
mode='markers',
marker=dict(
size=1.2,
color=tmp_col_lit, #'purple', # set color to an array/list of desired values
colorscale='Viridis', # choose a colorscale
opacity=0.15
)
)
data.append(trace_scatter)
layout = Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
),
paper_bgcolor='rgb(0,0,0)',
plot_bgcolor='rgb(0,0,0)'
)
fig = Figure(data=data, layout=layout)
plotly.offline.plot(fig, filename= self._path + '/' + self._token + "_region_color.html")
| apache-2.0 |
krischer/LASIF | lasif/window_selection.py | 1 | 36635 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Window selection algorithm.
This module aims to provide a window selection algorithm suitable for
calculating phase misfits between two seismic waveforms.
The main function is the select_windows() function. The selection process is a
multi-stage process. Initially all time steps are considered to be valid in
the sense as being suitable for window selection. Then a number of selectors
is applied, progressively excluding more and more time steps.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2013
:license:
GNU General Public License, Version 3
(http://www.gnu.org/copyleft/gpl.html)
"""
import itertools
import math
import numpy as np
from obspy import geodetics
import obspy.signal.filter
from scipy.signal import argrelextrema
def flatnotmasked_contiguous(time_windows):
"""
Helper function enabling to loop over empty time windows.
"""
fc = np.ma.flatnotmasked_contiguous(time_windows)
# If nothing could be found, set the mask to true (which should already
# be the case).
if fc is None:
return []
else:
return fc
def find_local_extrema(data):
"""
Function finding local extrema. It can also deal with flat extrema,
e.g. a flat top or bottom. In that case the first index of all flat
values will be returned.
Returns a tuple of maxima and minima indices.
"""
length = len(data) - 1
diff = np.diff(data)
flats = np.argwhere(diff == 0)
# Discard neighbouring flat points.
new_flats = list(flats[0:1])
for i, j in zip(flats[:-1], flats[1:]):
if j - i == 1:
continue
new_flats.append(j)
flats = new_flats
maxima = []
minima = []
# Go over each flats position and check if its a maxima/minima.
for idx in flats:
l_type = "left"
r_type = "right"
for i in itertools.count():
this_idx = idx - i - 1
if diff[this_idx] < 0:
l_type = "minima"
break
elif diff[this_idx] > 0:
l_type = "maxima"
break
for i in itertools.count():
this_idx = idx + i + 1
if this_idx >= len(diff):
break
if diff[this_idx] < 0:
r_type = "maxima"
break
elif diff[this_idx] > 0:
r_type = "minima"
break
if r_type != l_type:
continue
if r_type == "maxima":
maxima.append(int(idx))
else:
minima.append(int(idx))
maxs = set(list(argrelextrema(data, np.greater)[0]))
mins = set(list(argrelextrema(data, np.less)[0]))
peaks, troughs = (
sorted(list(maxs.union(set(maxima)))),
sorted(list(mins.union(set(minima)))))
# Special case handling for missing one or the other.
if not peaks and not troughs:
return np.array([], dtype=np.int32), np.array([], dtype=np.int32)
elif not peaks:
if 0 not in troughs:
peaks.insert(0, 0)
if length not in troughs:
peaks.append(length)
return (np.array(peaks, dtype=np.int32),
np.array(troughs, dtype=np.int32))
elif not troughs:
if 0 not in peaks:
troughs.insert(0, 0)
if length not in peaks:
troughs.append(length)
return (np.array(peaks, dtype=np.int32),
np.array(troughs, dtype=np.int32))
# Mark the first and last values as well to facilitate the peak and
# trough marching algorithm
if 0 not in peaks and 0 not in troughs:
if peaks[0] < troughs[0]:
troughs.insert(0, 0)
else:
peaks.insert(0, 0)
if length not in peaks and length not in troughs:
if peaks[-1] < troughs[-1]:
peaks.append(length)
else:
troughs.append(length)
return (np.array(peaks, dtype=np.int32),
np.array(troughs, dtype=np.int32))
def find_closest(ref_array, target):
"""
For every value in target, find the index of ref_array to which
the value is closest.
from http://stackoverflow.com/a/8929827/1657047
:param ref_array: The reference array. Must be sorted!
:type ref_array: :class:`numpy.ndarray`
:param target: The target array.
:type target: :class:`numpy.ndarray`
>>> ref_array = np.arange(0, 20.)
>>> target = np.array([-2, 100., 2., 2.4, 2.5, 2.6])
>>> find_closest(ref_array, target)
array([ 0, 19, 2, 2, 3, 3])
"""
# A must be sorted
idx = ref_array.searchsorted(target)
idx = np.clip(idx, 1, len(ref_array) - 1)
left = ref_array[idx - 1]
right = ref_array[idx]
idx -= target - left < right - target
return idx
def _plot_mask(new_mask, old_mask, name=None):
"""
Helper function plotting the remaining time segments after an elimination
stage.
Useful to figure out which stage is responsible for a certain window
being picked/rejected.
:param new_mask: The mask after the elimination stage.
:param old_mask: The mask before the elimination stage.
:param name: The name of the elimination stage.
:return:
"""
# Lazy imports as not needed by default.
import matplotlib.pylab as plt # NOQA
import matplotlib.patheffects as PathEffects # NOQA
old_mask = old_mask.copy()
new_mask = new_mask.copy()
new_mask.mask = np.bitwise_xor(old_mask.mask, new_mask.mask)
old_mask.mask = np.invert(old_mask.mask)
for i in flatnotmasked_contiguous(old_mask):
plt.fill_between((i.start, i.stop), (-1.0, -1.0), (2.0, 2.0),
color="gray", alpha=0.3, lw=0)
new_mask.mask = np.invert(new_mask.mask)
for i in flatnotmasked_contiguous(new_mask):
plt.fill_between((i.start, i.stop), (-1.0, -1.0), (2.0, 2.0),
color="#fb9a99", lw=0)
if name:
plt.text(len(new_mask) - 1 - 20, 0.5, name, verticalalignment="center",
horizontalalignment="right",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")],
fontweight=500)
plt.xlim(0, len(new_mask) - 1)
plt.ylim(0, 1)
plt.yticks([])
plt.gca().xaxis.set_ticklabels([])
def _window_generator(data_length, window_width):
"""
Simple generator yielding start and stop indices for sliding windows.
:param data_length: The complete length of the data series over which to
slide the window.
:param window_width: The desired window width.
"""
window_start = 0
while True:
window_end = window_start + window_width
if window_end > data_length:
break
yield (window_start, window_end, window_start + window_width // 2)
window_start += 1
def _log_window_selection(tr_id, msg):
"""
Helper function for consistent output during the window selection.
:param tr_id: The id of the current trace.
:param msg: The message to be printed.
"""
print "[Window selection for %s] %s" % (tr_id, msg)
# Dictionary to cache the TauPyModel so there is no need to reinitialize it
# each time which is a fairly expensive operation.
TAUPY_MODEL_CACHE = {}
def select_windows(data_trace, synthetic_trace, event_latitude,
event_longitude, event_depth_in_km,
station_latitude, station_longitude, minimum_period,
maximum_period,
min_cc=0.10, max_noise=0.10, max_noise_window=0.4,
min_velocity=2.4, threshold_shift=0.30,
threshold_correlation=0.75, min_length_period=1.5,
min_peaks_troughs=2, max_energy_ratio=10.0,
min_envelope_similarity=0.2,
verbose=False, plot=False):
"""
Window selection algorithm for picking windows suitable for misfit
calculation based on phase differences.
Returns a list of windows which might be empty due to various reasons.
This function is really long and a lot of things. For a more detailed
description, please see the LASIF paper.
:param data_trace: The data trace.
:type data_trace: :class:`~obspy.core.trace.Trace`
:param synthetic_trace: The synthetic trace.
:type synthetic_trace: :class:`~obspy.core.trace.Trace`
:param event_latitude: The event latitude.
:type event_latitude: float
:param event_longitude: The event longitude.
:type event_longitude: float
:param event_depth_in_km: The event depth in km.
:type event_depth_in_km: float
:param station_latitude: The station latitude.
:type station_latitude: float
:param station_longitude: The station longitude.
:type station_longitude: float
:param minimum_period: The minimum period of the data in seconds.
:type minimum_period: float
:param maximum_period: The maximum period of the data in seconds.
:type maximum_period: float
:param min_cc: Minimum normalised correlation coefficient of the
complete traces.
:type min_cc: float
:param max_noise: Maximum relative noise level for the whole trace.
Measured from maximum amplitudes before and after the first arrival.
:type max_noise: float
:param max_noise_window: Maximum relative noise level for individual
windows.
:type max_noise_window: float
:param min_velocity: All arrivals later than those corresponding to the
threshold velocity [km/s] will be excluded.
:type min_velocity: float
:param threshold_shift: Maximum allowable time shift within a window,
as a fraction of the minimum period.
:type threshold_shift: float
:param threshold_correlation: Minimum normalised correlation coeeficient
within a window.
:type threshold_correlation: float
:param min_length_period: Minimum length of the time windows relative to
the minimum period.
:type min_length_period: float
:param min_peaks_troughs: Minimum number of extrema in an individual
time window (excluding the edges).
:type min_peaks_troughs: float
:param max_energy_ratio: Maximum energy ratio between data and
synthetics within a time window. Don't make this too small!
:type max_energy_ratio: float
:param min_envelope_similarity: The minimum similarity of the envelopes of
both data and synthetics. This essentially assures that the
amplitudes of data and synthetics can not diverge too much within a
window. It is a bit like the inverse of the ratio of both envelopes
so a value of 0.2 makes sure neither amplitude can be more then 5
times larger than the other.
:type min_envelope_similarity: float
:param verbose: No output by default.
:type verbose: bool
:param plot: Create a plot of the algortihm while it does its work.
:type plot: bool
"""
# Shortcuts to frequently accessed variables.
data_starttime = data_trace.stats.starttime
data_delta = data_trace.stats.delta
dt = data_trace.stats.delta
npts = data_trace.stats.npts
synth = synthetic_trace.data
data = data_trace.data
times = data_trace.times()
# Fill cache if necessary.
if not TAUPY_MODEL_CACHE:
from obspy.taup import TauPyModel # NOQA
TAUPY_MODEL_CACHE["model"] = TauPyModel("AK135")
model = TAUPY_MODEL_CACHE["model"]
# -------------------------------------------------------------------------
# Geographical calculations and the time of the first arrival.
# -------------------------------------------------------------------------
dist_in_deg = geodetics.locations2degrees(station_latitude,
station_longitude,
event_latitude, event_longitude)
dist_in_km = geodetics.calc_vincenty_inverse(
station_latitude, station_longitude, event_latitude,
event_longitude)[0] / 1000.0
# Get only a couple of P phases which should be the first arrival
# for every epicentral distance. Its quite a bit faster than calculating
# the arrival times for every phase.
# Assumes the first sample is the centroid time of the event.
tts = model.get_travel_times(source_depth_in_km=event_depth_in_km,
distance_in_degree=dist_in_deg,
phase_list=["ttp"])
# Sort just as a safety measure.
tts = sorted(tts, key=lambda x: x.time)
first_tt_arrival = tts[0].time
# -------------------------------------------------------------------------
# Window settings
# -------------------------------------------------------------------------
# Number of samples in the sliding window. Currently, the length of the
# window is set to a multiple of the dominant period of the synthetics.
# Make sure it is an uneven number; just to have a trivial midpoint
# definition and one sample does not matter much in any case.
window_length = int(round(float(2 * minimum_period) / dt))
if not window_length % 2:
window_length += 1
# Use a Hanning window. No particular reason for it but its a well-behaved
# window and has nice spectral properties.
taper = np.hanning(window_length)
# =========================================================================
# check if whole seismograms are sufficiently correlated and estimate
# noise level
# =========================================================================
# Overall Correlation coefficient.
norm = np.sqrt(np.sum(data ** 2)) * np.sqrt(np.sum(synth ** 2))
cc = np.sum(data * synth) / norm
if verbose:
_log_window_selection(data_trace.id,
"Correlation Coefficient: %.4f" % cc)
# Estimate noise level from waveforms prior to the first arrival.
idx_end = int(np.ceil((first_tt_arrival - 0.5 * minimum_period) / dt))
idx_end = max(10, idx_end)
idx_start = int(np.ceil((first_tt_arrival - 2.5 * minimum_period) / dt))
idx_start = max(10, idx_start)
if idx_start >= idx_end:
idx_start = max(0, idx_end - 10)
abs_data = np.abs(data)
noise_absolute = abs_data[idx_start:idx_end].max()
noise_relative = noise_absolute / abs_data.max()
if verbose:
_log_window_selection(data_trace.id,
"Absolute Noise Level: %e" % noise_absolute)
_log_window_selection(data_trace.id,
"Relative Noise Level: %e" % noise_relative)
# Basic global rejection criteria.
accept_traces = True
if (cc < min_cc) and (noise_relative > max_noise / 3.0):
msg = "Correlation %.4f is below threshold of %.4f" % (cc, min_cc)
if verbose:
_log_window_selection(data_trace.id, msg)
accept_traces = msg
if noise_relative > max_noise:
msg = "Noise level %.3f is above threshold of %.3f" % (
noise_relative, max_noise)
if verbose:
_log_window_selection(
data_trace.id, msg)
accept_traces = msg
# Calculate the envelope of both data and synthetics. This is to make sure
# that the amplitude of both is not too different over time and is
# used as another selector. Only calculated if the trace is generally
# accepted as it is fairly slow.
if accept_traces is True:
data_env = obspy.signal.filter.envelope(data)
synth_env = obspy.signal.filter.envelope(synth)
# -------------------------------------------------------------------------
# Initial Plot setup.
# -------------------------------------------------------------------------
# All the plot calls are interleaved. I realize this is really ugly but
# the alternative would be to either have two functions (one with plots,
# one without) or split the plotting function in various subfunctions,
# neither of which are acceptable in my opinion. The impact on
# performance is minimal if plotting is turned off: all imports are lazy
# and a couple of conditionals are cheap.
if plot:
import matplotlib.pylab as plt # NOQA
import matplotlib.patheffects as PathEffects # NOQA
if accept_traces is True:
plt.figure(figsize=(18, 12))
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.98, top=0.95,
wspace=None, hspace=0.0)
grid = (31, 1)
# Axes showing the data.
data_plot = plt.subplot2grid(grid, (0, 0), rowspan=8)
else:
# Only show one axes it the traces are not accepted.
plt.figure(figsize=(18, 3))
# Plot envelopes if needed.
if accept_traces is True:
plt.plot(times, data_env, color="black", alpha=0.5, lw=0.4,
label="data envelope")
plt.plot(synthetic_trace.times(), synth_env, color="#e41a1c",
alpha=0.4, lw=0.5, label="synthetics envelope")
plt.plot(times, data, color="black", label="data", lw=1.5)
plt.plot(synthetic_trace.times(), synth, color="#e41a1c",
label="synthetics", lw=1.5)
# Symmetric around y axis.
middle = data.mean()
d_max, d_min = data.max(), data.min()
r = max(d_max - middle, middle - d_min) * 1.1
ylim = (middle - r, middle + r)
xlim = (times[0], times[-1])
plt.ylim(*ylim)
plt.xlim(*xlim)
offset = (xlim[1] - xlim[0]) * 0.005
plt.vlines(first_tt_arrival, ylim[0], ylim[1], colors="#ff7f00", lw=2)
plt.text(first_tt_arrival + offset,
ylim[1] - (ylim[1] - ylim[0]) * 0.02,
"first arrival", verticalalignment="top",
horizontalalignment="left", color="#ee6e00",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.vlines(first_tt_arrival - minimum_period / 2.0, ylim[0], ylim[1],
colors="#ff7f00", lw=2)
plt.text(first_tt_arrival - minimum_period / 2.0 - offset,
ylim[0] + (ylim[1] - ylim[0]) * 0.02,
"first arrival - min period / 2", verticalalignment="bottom",
horizontalalignment="right", color="#ee6e00",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
for velocity in [6, 5, 4, 3, min_velocity]:
tt = dist_in_km / velocity
plt.vlines(tt, ylim[0], ylim[1], colors="gray", lw=2)
if velocity == min_velocity:
hal = "right"
o_s = -1.0 * offset
else:
hal = "left"
o_s = offset
plt.text(tt + o_s, ylim[0] + (ylim[1] - ylim[0]) * 0.02,
str(velocity) + " km/s", verticalalignment="bottom",
horizontalalignment=hal, color="0.15")
plt.vlines(dist_in_km / min_velocity + minimum_period / 2.0,
ylim[0], ylim[1], colors="gray", lw=2)
plt.text(dist_in_km / min_velocity + minimum_period / 2.0 - offset,
ylim[1] - (ylim[1] - ylim[0]) * 0.02,
"min surface velocity + min period / 2",
verticalalignment="top",
horizontalalignment="right", color="0.15", path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.hlines(noise_absolute, xlim[0], xlim[1], linestyle="--",
color="gray")
plt.hlines(-noise_absolute, xlim[0], xlim[1], linestyle="--",
color="gray")
plt.text(offset, noise_absolute + (ylim[1] - ylim[0]) * 0.01,
"noise level", verticalalignment="bottom",
horizontalalignment="left", color="0.15",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
fontsize="small")
plt.gca().xaxis.set_ticklabels([])
# Plot the basic global information.
ax = plt.gca()
txt = (
"Total CC Coeff: %.4f\nAbsolute Noise: %e\nRelative Noise: %.3f"
% (cc, noise_absolute, noise_relative))
ax.text(0.01, 0.95, txt, transform=ax.transAxes,
fontdict=dict(fontsize="small", ha='left', va='top'),
bbox=dict(boxstyle="round", fc="w", alpha=0.8))
plt.suptitle("Channel %s" % data_trace.id, fontsize="larger")
# Show plot and return if not accepted.
if accept_traces is not True:
txt = "Rejected: %s" % (accept_traces)
ax.text(0.99, 0.95, txt, transform=ax.transAxes,
fontdict=dict(fontsize="small", ha='right', va='top'),
bbox=dict(boxstyle="round", fc="red", alpha=1.0))
plt.show()
if accept_traces is not True:
return []
# Initialise masked arrays. The mask will be set to True where no
# windows are chosen.
time_windows = np.ma.ones(npts)
time_windows.mask = False
if plot:
old_time_windows = time_windows.copy()
# Elimination Stage 1: Eliminate everything half a period before or
# after the minimum and maximum travel times, respectively.
# theoretical arrival as positive.
min_idx = int((first_tt_arrival - (minimum_period / 2.0)) / dt)
max_idx = int(math.ceil((
dist_in_km / min_velocity + minimum_period / 2.0) / dt))
time_windows.mask[:min_idx + 1] = True
time_windows.mask[max_idx:] = True
if plot:
plt.subplot2grid(grid, (8, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="TRAVELTIME ELIMINATION")
old_time_windows = time_windows.copy()
# -------------------------------------------------------------------------
# Compute sliding time shifts and correlation coefficients for time
# frames that passed the traveltime elimination stage.
# -------------------------------------------------------------------------
# Allocate arrays to collect the time dependent values.
sliding_time_shift = np.ma.zeros(npts, dtype="float32")
sliding_time_shift.mask = True
max_cc_coeff = np.ma.zeros(npts, dtype="float32")
max_cc_coeff.mask = True
for start_idx, end_idx, midpoint_idx in _window_generator(npts,
window_length):
if not min_idx < midpoint_idx < max_idx:
continue
# Slice windows. Create a copy to be able to taper without affecting
# the original time series.
data_window = data[start_idx: end_idx].copy() * taper
synthetic_window = \
synth[start_idx: end_idx].copy() * taper
# Elimination Stage 2: Skip windows that have essentially no energy
# to avoid instabilities. No windows can be picked in these.
if synthetic_window.ptp() < synth.ptp() * 0.001:
time_windows.mask[midpoint_idx] = True
continue
# Calculate the time shift. Here this is defined as the shift of the
# synthetics relative to the data. So a value of 2, for instance, means
# that the synthetics are 2 timesteps later then the data.
cc = np.correlate(data_window, synthetic_window, mode="full")
time_shift = cc.argmax() - window_length + 1
# Express the time shift in fraction of the minimum period.
sliding_time_shift[midpoint_idx] = (time_shift * dt) / minimum_period
# Normalized cross correlation.
max_cc_value = cc.max() / np.sqrt((synthetic_window ** 2).sum() *
(data_window ** 2).sum())
max_cc_coeff[midpoint_idx] = max_cc_value
if plot:
plt.subplot2grid(grid, (9, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="NO ENERGY IN CC WINDOW")
# Axes with the CC coeffs
plt.subplot2grid(grid, (15, 0), rowspan=4)
plt.hlines(0, xlim[0], xlim[1], color="lightgray")
plt.hlines(-threshold_shift, xlim[0], xlim[1], color="gray",
linestyle="--")
plt.hlines(threshold_shift, xlim[0], xlim[1], color="gray",
linestyle="--")
plt.text(5, -threshold_shift - (2) * 0.03,
"threshold", verticalalignment="top",
horizontalalignment="left", color="0.15",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.plot(times, sliding_time_shift, color="#377eb8",
label="Time shift in fraction of minimum period", lw=1.5)
ylim = plt.ylim()
plt.yticks([-0.75, 0, 0.75])
plt.xticks([300, 600, 900, 1200, 1500, 1800])
plt.ylim(ylim[0], ylim[1] + ylim[1] - ylim[0])
plt.ylim(-1.0, 1.0)
plt.xlim(xlim)
plt.gca().xaxis.set_ticklabels([])
plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
fontsize="small")
plt.subplot2grid(grid, (10, 0), rowspan=4)
plt.hlines(threshold_correlation, xlim[0], xlim[1], color="0.15",
linestyle="--")
plt.hlines(1, xlim[0], xlim[1], color="lightgray")
plt.hlines(0, xlim[0], xlim[1], color="lightgray")
plt.text(5, threshold_correlation + (1.4) * 0.01,
"threshold", verticalalignment="bottom",
horizontalalignment="left", color="0.15",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.plot(times, max_cc_coeff, color="#4daf4a",
label="Maximum CC coefficient", lw=1.5)
plt.ylim(-0.2, 1.2)
plt.yticks([0, 0.5, 1])
plt.xticks([300, 600, 900, 1200, 1500, 1800])
plt.xlim(xlim)
plt.gca().xaxis.set_ticklabels([])
plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
fontsize="small")
# Elimination Stage 3: Mark all areas where the normalized cross
# correlation coefficient is under threshold_correlation as negative
if plot:
old_time_windows = time_windows.copy()
time_windows.mask[max_cc_coeff < threshold_correlation] = True
if plot:
plt.subplot2grid(grid, (14, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="CORRELATION COEFF THRESHOLD ELIMINATION")
# Elimination Stage 4: Mark everything with an absolute travel time
# shift of more than # threshold_shift times the dominant period as
# negative
if plot:
old_time_windows = time_windows.copy()
time_windows.mask[np.ma.abs(sliding_time_shift) > threshold_shift] = True
if plot:
plt.subplot2grid(grid, (19, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="TIME SHIFT THRESHOLD ELIMINATION")
# Elimination Stage 5: Mark the area around every "travel time shift
# jump" (based on the traveltime time difference) negative. The width of
# the area is currently chosen to be a tenth of a dominant period to
# each side.
if plot:
old_time_windows = time_windows.copy()
sample_buffer = int(np.ceil(minimum_period / dt * 0.1))
indices = np.ma.where(np.ma.abs(np.ma.diff(sliding_time_shift)) > 0.1)[0]
for index in indices:
time_windows.mask[index - sample_buffer: index + sample_buffer] = True
if plot:
plt.subplot2grid(grid, (20, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="TIME SHIFT JUMPS ELIMINATION")
# Clip both to avoid large numbers by division.
stacked = np.vstack([
np.ma.clip(synth_env, synth_env.max() * min_envelope_similarity * 0.5,
synth_env.max()),
np.ma.clip(data_env, data_env.max() * min_envelope_similarity * 0.5,
data_env.max())])
# Ratio.
ratio = stacked.min(axis=0) / stacked.max(axis=0)
# Elimination Stage 6: Make sure the amplitudes of both don't vary too
# much.
if plot:
old_time_windows = time_windows.copy()
time_windows.mask[ratio < min_envelope_similarity] = True
if plot:
plt.subplot2grid(grid, (25, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="ENVELOPE AMPLITUDE SIMILARITY ELIMINATION")
if plot:
plt.subplot2grid(grid, (21, 0), rowspan=4)
plt.hlines(min_envelope_similarity, xlim[0], xlim[1], color="gray",
linestyle="--")
plt.text(5, min_envelope_similarity + (2) * 0.03,
"threshold", verticalalignment="bottom",
horizontalalignment="left", color="0.15",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.plot(times, ratio, color="#9B59B6",
label="Envelope amplitude similarity", lw=1.5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
plt.ylim(0.05, 1.05)
plt.xticks([300, 600, 900, 1200, 1500, 1800])
plt.xlim(xlim)
plt.gca().xaxis.set_ticklabels([])
plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
fontsize="small")
# First minimum window length elimination stage. This is cheap and if
# not done it can easily destabilize the peak-and-trough marching stage
# which would then have to deal with way more edge cases.
if plot:
old_time_windows = time_windows.copy()
min_length = \
min(minimum_period / dt * min_length_period, maximum_period / dt)
for i in flatnotmasked_contiguous(time_windows):
# Step 7: Throw away all windows with a length of less then
# min_length_period the dominant period.
if (i.stop - i.start) < min_length:
time_windows.mask[i.start: i.stop] = True
if plot:
plt.subplot2grid(grid, (26, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="MINIMUM WINDOW LENGTH ELIMINATION 1")
# -------------------------------------------------------------------------
# Peak and trough marching algorithm
# -------------------------------------------------------------------------
final_windows = []
for i in flatnotmasked_contiguous(time_windows):
# Cut respective windows.
window_npts = i.stop - i.start
synthetic_window = synth[i.start: i.stop]
data_window = data[i.start: i.stop]
# Find extrema in the data and the synthetics.
data_p, data_t = find_local_extrema(data_window)
synth_p, synth_t = find_local_extrema(synthetic_window)
window_mask = np.ones(window_npts, dtype="bool")
closest_peaks = find_closest(data_p, synth_p)
diffs = np.diff(closest_peaks)
for idx in np.where(diffs == 1)[0]:
if idx > 0:
start = synth_p[idx - 1]
else:
start = 0
if idx < (len(synth_p) - 1):
end = synth_p[idx + 1]
else:
end = -1
window_mask[start: end] = False
closest_troughs = find_closest(data_t, synth_t)
diffs = np.diff(closest_troughs)
for idx in np.where(diffs == 1)[0]:
if idx > 0:
start = synth_t[idx - 1]
else:
start = 0
if idx < (len(synth_t) - 1):
end = synth_t[idx + 1]
else:
end = -1
window_mask[start: end] = False
window_mask = np.ma.masked_array(window_mask,
mask=window_mask)
if window_mask.mask.all():
continue
for j in flatnotmasked_contiguous(window_mask):
final_windows.append((i.start + j.start, i.start + j.stop))
if plot:
old_time_windows = time_windows.copy()
time_windows.mask[:] = True
for start, stop in final_windows:
time_windows.mask[start:stop] = False
if plot:
plt.subplot2grid(grid, (27, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="PEAK AND TROUGH MARCHING ELIMINATION")
# Loop through all the time windows, remove windows not satisfying the
# minimum number of peaks and troughs per window. Acts mainly as a
# safety guard.
old_time_windows = time_windows.copy()
for i in flatnotmasked_contiguous(old_time_windows):
synthetic_window = synth[i.start: i.stop]
data_window = data[i.start: i.stop]
data_p, data_t = find_local_extrema(data_window)
synth_p, synth_t = find_local_extrema(synthetic_window)
if np.min([len(synth_p), len(synth_t), len(data_p), len(data_t)]) < \
min_peaks_troughs:
time_windows.mask[i.start: i.stop] = True
if plot:
plt.subplot2grid(grid, (28, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="PEAK/TROUGH COUNT ELIMINATION")
# Second minimum window length elimination stage.
if plot:
old_time_windows = time_windows.copy()
min_length = \
min(minimum_period / dt * min_length_period, maximum_period / dt)
for i in flatnotmasked_contiguous(time_windows):
# Step 7: Throw away all windows with a length of less then
# min_length_period the dominant period.
if (i.stop - i.start) < min_length:
time_windows.mask[i.start: i.stop] = True
if plot:
plt.subplot2grid(grid, (29, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="MINIMUM WINDOW LENGTH ELIMINATION 2")
# Final step, eliminating windows with little energy.
final_windows = []
for j in flatnotmasked_contiguous(time_windows):
# Again assert a certain minimal length.
if (j.stop - j.start) < min_length:
continue
# Compare the energy in the data window and the synthetic window.
data_energy = (data[j.start: j.stop] ** 2).sum()
synth_energy = (synth[j.start: j.stop] ** 2).sum()
energies = sorted([data_energy, synth_energy])
if energies[1] > max_energy_ratio * energies[0]:
if verbose:
_log_window_selection(
data_trace.id,
"Deselecting window due to energy ratio between "
"data and synthetics.")
continue
# Check that amplitudes in the data are above the noise
if noise_absolute / data[j.start: j.stop].ptp() > \
max_noise_window:
if verbose:
_log_window_selection(
data_trace.id,
"Deselecting window due having no amplitude above the "
"signal to noise ratio.")
final_windows.append((j.start, j.stop))
if plot:
old_time_windows = time_windows.copy()
time_windows.mask[:] = True
for start, stop in final_windows:
time_windows.mask[start:stop] = False
if plot:
plt.subplot2grid(grid, (30, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="LITTLE ENERGY ELIMINATION")
if verbose:
_log_window_selection(
data_trace.id,
"Done, Selected %i window(s)" % len(final_windows))
# Final step is to convert the index value windows to actual times.
windows = []
for start, stop in final_windows:
start = data_starttime + start * data_delta
stop = data_starttime + stop * data_delta
windows.append((start, stop))
if plot:
# Plot the final windows to the data axes.
import matplotlib.transforms as mtransforms # NOQA
ax = data_plot
trans = mtransforms.blended_transform_factory(ax.transData,
ax.transAxes)
for start, stop in final_windows:
ax.fill_between([start * data_delta, stop * data_delta], 0, 1,
facecolor="#CDDC39", alpha=0.5, transform=trans)
plt.show()
return windows
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/io/tests/json/test_ujson.py | 7 | 56342 | # -*- coding: utf-8 -*-
from unittest import TestCase
try:
import json
except ImportError:
import simplejson as json
import math
import nose
import platform
import sys
import time
import datetime
import calendar
import re
import decimal
from functools import partial
from pandas.compat import range, zip, StringIO, u
import pandas.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex
import pandas.util.testing as tm
def _skip_if_python_ver(skip_major, skip_minor=None):
major, minor = sys.version_info[:2]
if major == skip_major and (skip_minor is None or minor == skip_minor):
raise nose.SkipTest("skipping Python version %d.%d" % (major, minor))
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
class UltraJSONTests(TestCase):
def test_encodeDecimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(decoded, 1337.1337)
def test_encodeStringConversion(self):
input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(input, **encode_kwargs)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, expected_output)
self.assertEqual(input, ujson.decode(output))
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded, ensure_ascii=True)
helper(not_html_encoded, ensure_ascii=False)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, ensure_ascii=True, encode_html_chars=False)
helper(not_html_encoded, ensure_ascii=False, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, ensure_ascii=True, encode_html_chars=True)
helper(html_encoded, ensure_ascii=False, encode_html_chars=True)
def test_doubleLongIssue(self):
sut = {u('a'): -4342969734183514}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
self.assertEqual(sut, decoded)
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(sut, decoded)
def test_doubleLongDecimalIssue(self):
sut = {u('a'): -12345678901234.56789012}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
self.assertEqual(sut, decoded)
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(sut, decoded)
def test_encodeNonCLocale(self):
import locale
savedlocale = locale.getlocale(locale.LC_NUMERIC)
try:
locale.setlocale(locale.LC_NUMERIC, 'it_IT.UTF-8')
except:
try:
locale.setlocale(locale.LC_NUMERIC, 'Italian_Italy')
except:
raise nose.SkipTest('Could not set locale for testing')
self.assertEqual(ujson.loads(ujson.dumps(4.78e60)), 4.78e60)
self.assertEqual(ujson.loads('4.78', precise_float=True), 4.78)
locale.setlocale(locale.LC_NUMERIC, savedlocale)
def test_encodeDecodeLongDecimal(self):
sut = {u('a'): -528656961.4399388}
encoded = ujson.dumps(sut, double_precision=15)
ujson.decode(encoded)
def test_decimalDecodeTestPrecise(self):
sut = {u('a'): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
self.assertEqual(sut, decoded)
def test_encodeDoubleTinyExponential(self):
if compat.is_platform_windows() and not compat.PY3:
raise nose.SkipTest("buggy on win-64 for py2")
num = 1e-40
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = 1e-100
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = -1e-45
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = -1e-145
self.assertTrue(np.allclose(num, ujson.decode(ujson.encode(num))))
def test_encodeDictWithUnicodeKeys(self):
input = {u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1"),
u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1")}
output = ujson.encode(input)
input = {u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1")}
output = ujson.encode(input) # noqa
def test_encodeDoubleConversion(self):
input = math.pi
output = ujson.encode(input)
self.assertEqual(round(input, 5), round(json.loads(output), 5))
self.assertEqual(round(input, 5), round(ujson.decode(output), 5))
def test_encodeWithDecimal(self):
input = 1.0
output = ujson.encode(input)
self.assertEqual(output, "1.0")
def test_encodeDoubleNegConversion(self):
input = -math.pi
output = ujson.encode(input)
self.assertEqual(round(input, 5), round(json.loads(output), 5))
self.assertEqual(round(input, 5), round(ujson.decode(output), 5))
def test_encodeArrayOfNestedArrays(self):
input = [[[[]]]] * 20
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
# self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
input = np.array(input)
tm.assert_numpy_array_equal(input, ujson.decode(
output, numpy=True, dtype=input.dtype))
def test_encodeArrayOfDoubles(self):
input = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
# self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(
np.array(input), ujson.decode(output, numpy=True))
def test_doublePrecisionTest(self):
input = 30.012345678901234
output = ujson.encode(input, double_precision=15)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
output = ujson.encode(input, double_precision=9)
self.assertEqual(round(input, 9), json.loads(output))
self.assertEqual(round(input, 9), ujson.decode(output))
output = ujson.encode(input, double_precision=3)
self.assertEqual(round(input, 3), json.loads(output))
self.assertEqual(round(input, 3), ujson.decode(output))
def test_invalidDoublePrecision(self):
input = 30.12345678901234567890
self.assertRaises(ValueError, ujson.encode, input, double_precision=20)
self.assertRaises(ValueError, ujson.encode, input, double_precision=-1)
# will throw typeError
self.assertRaises(TypeError, ujson.encode, input, double_precision='9')
# will throw typeError
self.assertRaises(TypeError, ujson.encode,
input, double_precision=None)
def test_encodeStringConversion2(self):
input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, '"A string \\\\ \\/ \\b \\f \\n \\r \\t"')
self.assertEqual(input, ujson.decode(output))
pass
def test_decodeUnicodeConversion(self):
pass
def test_encodeUnicodeConversion1(self):
input = "Räksmörgås اسامة بن محمد بن عوض بن لادن"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeControlEscaping(self):
input = "\x19"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(input, dec)
self.assertEqual(enc, json_unicode(input))
def test_encodeUnicodeConversion2(self):
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicodeSurrogatePair(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf0\x90\x8d\x86"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicode4BytesUTF8(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicode4BytesUTF8Highest(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeArrayInArray(self):
input = [[[[]]]]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(
np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeIntConversion(self):
input = 31337
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeIntNegConversion(self):
input = -31337
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeLongNegConversion(self):
input = -9223372036854775808
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
def test_encodeListConversion(self):
input = [1, 2, 3, 4]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(
np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeDictConversion(self):
input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(input) # noqa
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeNoneConversion(self):
input = None
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeTrueConversion(self):
input = True
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeFalseConversion(self):
input = False
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
def test_encodeDatetimeConversion(self):
ts = time.time()
input = datetime.datetime.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
expected = calendar.timegm(input.utctimetuple())
self.assertEqual(int(expected), json.loads(output))
self.assertEqual(int(expected), ujson.decode(output))
def test_encodeDateConversion(self):
ts = time.time()
input = datetime.date.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
tup = (input.year, input.month, input.day, 0, 0, 0)
expected = calendar.timegm(tup)
self.assertEqual(int(expected), json.loads(output))
self.assertEqual(int(expected), ujson.decode(output))
def test_encodeTimeConversion(self):
tests = [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
]
for test in tests:
output = ujson.encode(test)
expected = '"%s"' % test.isoformat()
self.assertEqual(expected, output)
def test_encodeTimeConversion_pytz(self):
# GH11473 to_json segfaults with timezone-aware datetimes
tm._skip_if_no_pytz()
import pytz
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"%s"' % test.isoformat()
self.assertEqual(expected, output)
def test_encodeTimeConversion_dateutil(self):
# GH11473 to_json segfaults with timezone-aware datetimes
tm._skip_if_no_dateutil()
import dateutil
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"%s"' % test.isoformat()
self.assertEqual(expected, output)
def test_nat(self):
input = NaT
assert ujson.encode(input) == 'null', "Expected null"
def test_npy_nat(self):
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
raise nose.SkipTest("numpy version < 1.7.0, is "
"{0}".format(np.__version__))
input = np.datetime64('NaT')
assert ujson.encode(input) == 'null', "Expected null"
def test_datetime_units(self):
from pandas.lib import Timestamp
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
self.assertEqual(roundtrip, stamp.value // 10**9)
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
self.assertEqual(roundtrip, stamp.value // 10**6)
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
self.assertEqual(roundtrip, stamp.value // 10**3)
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
self.assertEqual(roundtrip, stamp.value)
self.assertRaises(ValueError, ujson.encode, val, date_unit='foo')
def test_encodeToUTF8(self):
_skip_if_python_ver(2, 5)
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input, ensure_ascii=False)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input, ensure_ascii=False))
self.assertEqual(dec, json.loads(enc))
def test_decodeFromUnicode(self):
input = u("{\"obj\": 31337}")
dec1 = ujson.decode(input)
dec2 = ujson.decode(str(input))
self.assertEqual(dec1, dec2)
def test_encodeRecursionMax(self):
# 8 is the max recursion depth
class O2:
member = 0
pass
class O1:
member = 0
pass
input = O1()
input.member = O2()
input.member.member = input
try:
output = ujson.encode(input) # noqa
assert False, "Expected overflow exception"
except(OverflowError):
pass
def test_encodeDoubleNan(self):
input = np.nan
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleInf(self):
input = np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleNegInf(self):
input = -np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_decodeJibberish(self):
input = "fdsa sda v9sa fdsa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayStart(self):
input = "["
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectStart(self):
input = "{"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayEnd(self):
input = "]"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeArrayDepthTooBig(self):
input = '[' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectEnd(self):
input = "}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeObjectDepthTooBig(self):
input = '{' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUnterminated(self):
input = "\"TESTING"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUntermEscapeSequence(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringBadEscape(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeTrueBroken(self):
input = "tru"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeFalseBroken(self):
input = "fa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNullBroken(self):
input = "n"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenDictKeyTypeLeakTest(self):
input = '{{1337:""}}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except ValueError:
continue
assert False, "Wrong exception"
def test_decodeBrokenDictLeakTest(self):
input = '{{"key":"}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeBrokenListLeakTest(self):
input = '[[[true'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeDictWithNoKey(self):
input = "{{{{31337}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoColonOrValue(self):
input = "{{{{\"key\"}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoValue(self):
input = "{{{{\"key\":}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNumericIntPos(self):
input = "31337"
self.assertEqual(31337, ujson.decode(input))
def test_decodeNumericIntNeg(self):
input = "-31337"
self.assertEqual(-31337, ujson.decode(input))
def test_encodeUnicode4BytesUTF8Fail(self):
_skip_if_python_ver(3)
input = "\xfd\xbf\xbf\xbf\xbf\xbf"
try:
enc = ujson.encode(input) # noqa
assert False, "Expected exception"
except OverflowError:
pass
def test_encodeNullCharacter(self):
input = "31337 \x00 1337"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
input = "\x00"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
self.assertEqual('" \\u0000\\r\\n "', ujson.dumps(u(" \u0000\r\n ")))
pass
def test_decodeNullCharacter(self):
input = "\"31337 \\u0000 31337\""
self.assertEqual(ujson.decode(input), json.loads(input))
def test_encodeListLongConversion(self):
input = [9223372036854775807, 9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807, 9223372036854775807]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(np.array(input),
ujson.decode(output, numpy=True,
dtype=np.int64))
pass
def test_encodeLongConversion(self):
input = 9223372036854775807
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_numericIntExp(self):
input = "1337E40"
output = ujson.decode(input)
self.assertEqual(output, json.loads(input))
def test_numericIntFrcExp(self):
input = "1.337E40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpEPLUS(self):
input = "1337E+9"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpePLUS(self):
input = "1.337e+40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpE(self):
input = "1337E40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpe(self):
input = "1337e40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpEMinus(self):
input = "1.337E-4"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpeMinus(self):
input = "1.337e-4"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_dumpToFile(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
self.assertEqual("[1,2,3]", f.getvalue())
def test_dumpToFileLikeObject(self):
class filelike:
def __init__(self):
self.bytes = ''
def write(self, bytes):
self.bytes += bytes
f = filelike()
ujson.dump([1, 2, 3], f)
self.assertEqual("[1,2,3]", f.bytes)
def test_dumpFileArgsError(self):
try:
ujson.dump([], '')
except TypeError:
pass
else:
assert False, 'expected TypeError'
def test_loadFile(self):
f = StringIO("[1,2,3,4]")
self.assertEqual([1, 2, 3, 4], ujson.load(f))
f = StringIO("[1,2,3,4]")
tm.assert_numpy_array_equal(
np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileLikeObject(self):
class filelike:
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
f = filelike()
self.assertEqual([1, 2, 3, 4], ujson.load(f))
f = filelike()
tm.assert_numpy_array_equal(
np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileArgsError(self):
try:
ujson.load("[]")
except TypeError:
pass
else:
assert False, "expected TypeError"
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encodeNumericOverflow(self):
try:
ujson.encode(12839128391289382193812939)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_encodeNumericOverflowNested(self):
for n in range(0, 100):
class Nested:
x = 12839128391289382193812939
nested = Nested()
try:
ujson.encode(nested)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_decodeNumberWith32bitSignBit(self):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
boundary1 = 2**31 # noqa
boundary2 = 2**32 # noqa
docs = (
'{"id": 3590016419}',
'{"id": %s}' % 2**31,
'{"id": %s}' % 2**32,
'{"id": %s}' % ((2**32) - 1),
)
results = (3590016419, 2**31, 2**32, 2**32 - 1)
for doc, result in zip(docs, results):
self.assertEqual(ujson.decode(doc)['id'], result)
def test_encodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
input = base * 1024 * 1024 * 2
output = ujson.encode(input) # noqa
def test_decodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
quote = compat.str_to_bytes("\"")
input = quote + (base * 1024 * 1024 * 2) + quote
output = ujson.decode(input) # noqa
def test_toDict(self):
d = {u("key"): 31337}
class DictTest:
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
self.assertEqual(dec, d)
def test_defaultHandler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
self.assertRaises(OverflowError, ujson.encode, _TestObject("foo"))
self.assertEqual('"foo"', ujson.encode(_TestObject("foo"),
default_handler=str))
def my_handler(obj):
return "foobar"
self.assertEqual('"foobar"', ujson.encode(_TestObject("foo"),
default_handler=my_handler))
def my_handler_raises(obj):
raise TypeError("I raise for anything")
with tm.assertRaisesRegexp(TypeError, "I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(obj):
return 42
self.assertEqual(
42, ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)))
def my_obj_handler(obj):
return datetime.datetime(2013, 2, 3)
self.assertEqual(
ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))),
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
l = [_TestObject("foo"), _TestObject("bar")]
self.assertEqual(json.loads(json.dumps(l, default=str)),
ujson.decode(ujson.encode(l, default_handler=str)))
class NumpyJSONTests(TestCase):
def testBool(self):
b = np.bool(True)
self.assertEqual(ujson.decode(ujson.encode(b)), b)
def testBoolArray(self):
inpt = np.array([True, False, True, True, False, True, False, False],
dtype=np.bool)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=np.bool)
tm.assert_numpy_array_equal(inpt, outp)
def testInt(self):
num = np.int(2562010)
self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
num = np.int8(127)
self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
num = np.int16(2562010)
self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
num = np.int32(2562010)
self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
num = np.int64(2562010)
self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
num = np.uint8(255)
self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
num = np.uint16(2562010)
self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
num = np.uint32(2562010)
self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
num = np.uint64(2562010)
self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
def testIntArray(self):
arr = np.arange(100, dtype=np.int)
dtypes = (np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
tm.assert_numpy_array_equal(inpt, outp)
def testIntMax(self):
num = np.int(np.iinfo(np.int).max)
self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
num = np.int8(np.iinfo(np.int8).max)
self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
num = np.int16(np.iinfo(np.int16).max)
self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
num = np.int32(np.iinfo(np.int32).max)
self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
num = np.uint8(np.iinfo(np.uint8).max)
self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
num = np.uint16(np.iinfo(np.uint16).max)
self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
num = np.uint32(np.iinfo(np.uint32).max)
self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
if platform.architecture()[0] != '32bit':
num = np.int64(np.iinfo(np.int64).max)
self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
# uint64 max will always overflow as it's encoded to signed
num = np.uint64(np.iinfo(np.int64).max)
self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
def testFloat(self):
num = np.float(256.2013)
self.assertEqual(np.float(ujson.decode(ujson.encode(num))), num)
num = np.float32(256.2013)
self.assertEqual(np.float32(ujson.decode(ujson.encode(num))), num)
num = np.float64(256.2013)
self.assertEqual(np.float64(ujson.decode(ujson.encode(num))), num)
def testFloatArray(self):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
dtypes = (np.float, np.float32, np.float64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(
inpt, double_precision=15)), dtype=dtype)
tm.assert_almost_equal(inpt, outp)
def testFloatMax(self):
num = np.float(np.finfo(np.float).max / 10)
tm.assert_almost_equal(np.float(ujson.decode(
ujson.encode(num, double_precision=15))), num, 15)
num = np.float32(np.finfo(np.float32).max / 10)
tm.assert_almost_equal(np.float32(ujson.decode(
ujson.encode(num, double_precision=15))), num, 15)
num = np.float64(np.finfo(np.float64).max / 10)
tm.assert_almost_equal(np.float64(ujson.decode(
ujson.encode(num, double_precision=15))), num, 15)
def testArrays(self):
arr = np.arange(100)
arr = arr.reshape((10, 10))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((5, 5, 4))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((100, 1))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
l = ['a', list(), dict(), dict(), list(),
42, 97.8, ['a', 'b'], {'key': 'val'}]
arr = np.array(l)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
arr = np.arange(100.202, 200.202, 1, dtype=np.float32)
arr = arr.reshape((5, 5, 4))
outp = np.array(ujson.decode(ujson.encode(arr)), dtype=np.float32)
tm.assert_almost_equal(arr, outp)
outp = ujson.decode(ujson.encode(arr), numpy=True, dtype=np.float32)
tm.assert_almost_equal(arr, outp)
def testOdArray(self):
def will_raise():
ujson.encode(np.array(1))
self.assertRaises(TypeError, will_raise)
def testArrayNumpyExcept(self):
input = ujson.dumps([42, {}, 'a'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps(['a', 'b', [], 'c'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, ['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{}, []])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, None])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 'b'}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps({'a': {'b': {'c': 42}}})
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 42, 'b': 23}, {'c': 17}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
def testArrayNumpyLabelled(self):
input = {'a': []}
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
self.assertTrue((np.empty((1, 0)) == output[0]).all())
self.assertTrue((np.array(['a']) == output[1]).all())
self.assertTrue(output[2] is None)
input = [{'a': 42}]
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
self.assertTrue((np.array([42]) == output[0]).all())
self.assertTrue(output[1] is None)
self.assertTrue((np.array([u('a')]) == output[2]).all())
# Write out the dump explicitly so there is no dependency on iteration
# order GH10837
input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, '
'{"a": 2.4, "b": 78}]')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
self.assertTrue((expectedvals == output[0]).all())
self.assertTrue(output[1] is None)
self.assertTrue((np.array([u('a'), 'b']) == output[2]).all())
input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
'"3": {"a": 2.4, "b": 78}}')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
self.assertTrue((expectedvals == output[0]).all())
self.assertTrue((np.array(['1', '2', '3']) == output[1]).all())
self.assertTrue((np.array(['a', 'b']) == output[2]).all())
class PandasJSONTests(TestCase):
def testDataFrame(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df)))
self.assertTrue((df == outp).values.all())
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split")))
outp = DataFrame(**dec)
self.assertTrue((df == outp).values.all())
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="records")))
outp.index = df.index
self.assertTrue((df == outp).values.all())
tm.assert_index_equal(df.columns, outp.columns)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="values")))
outp.index = df.index
self.assertTrue((df.values == outp.values).all())
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index")))
self.assertTrue((df.transpose() == outp).values.all())
tm.assert_index_equal(df.transpose().columns, outp.columns)
tm.assert_index_equal(df.transpose().index, outp.index)
def testDataFrameNumpy(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df), numpy=True))
self.assertTrue((df == outp).values.all())
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split"),
numpy=True))
outp = DataFrame(**dec)
self.assertTrue((df == outp).values.all())
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index"),
numpy=True))
self.assertTrue((df.transpose() == outp).values.all())
tm.assert_index_equal(df.transpose().columns, outp.columns)
tm.assert_index_equal(df.transpose().index, outp.index)
def testDataFrameNested(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
nested = {'df1': df, 'df2': df.copy()}
exp = {'df1': ujson.decode(ujson.encode(df)),
'df2': ujson.decode(ujson.encode(df))}
self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="index")),
'df2': ujson.decode(ujson.encode(df, orient="index"))}
self.assertTrue(ujson.decode(
ujson.encode(nested, orient="index")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="records")),
'df2': ujson.decode(ujson.encode(df, orient="records"))}
self.assertTrue(ujson.decode(
ujson.encode(nested, orient="records")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="values")),
'df2': ujson.decode(ujson.encode(df, orient="values"))}
self.assertTrue(ujson.decode(
ujson.encode(nested, orient="values")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="split")),
'df2': ujson.decode(ujson.encode(df, orient="split"))}
self.assertTrue(ujson.decode(
ujson.encode(nested, orient="split")) == exp)
def testDataFrameNumpyLabelled(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(*ujson.decode(ujson.encode(df),
numpy=True, labelled=True))
self.assertTrue((df.T == outp).values.all())
tm.assert_index_equal(df.T.columns, outp.columns)
tm.assert_index_equal(df.T.index, outp.index)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="records"),
numpy=True, labelled=True))
outp.index = df.index
self.assertTrue((df == outp).values.all())
tm.assert_index_equal(df.columns, outp.columns)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="index"),
numpy=True, labelled=True))
self.assertTrue((df == outp).values.all())
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
def testSeries(self):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
# column indexed
outp = Series(ujson.decode(ujson.encode(s))).sort_values()
exp = Series([10, 20, 30, 40, 50, 60],
index=['6', '7', '8', '9', '10', '15'])
tm.assert_series_equal(outp, exp)
outp = Series(ujson.decode(ujson.encode(s), numpy=True)).sort_values()
tm.assert_series_equal(outp, exp)
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split")))
outp = Series(**dec)
tm.assert_series_equal(outp, s)
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split"),
numpy=True))
outp = Series(**dec)
exp_np = Series(np.array([10, 20, 30, 40, 50, 60]))
exp_pd = Series([10, 20, 30, 40, 50, 60])
outp = Series(ujson.decode(ujson.encode(s, orient="records"),
numpy=True))
tm.assert_series_equal(outp, exp_np)
outp = Series(ujson.decode(ujson.encode(s, orient="records")))
exp = Series([10, 20, 30, 40, 50, 60])
tm.assert_series_equal(outp, exp_pd)
outp = Series(ujson.decode(ujson.encode(s, orient="values"),
numpy=True))
tm.assert_series_equal(outp, exp_np)
outp = Series(ujson.decode(ujson.encode(s, orient="values")))
tm.assert_series_equal(outp, exp_pd)
outp = Series(ujson.decode(ujson.encode(
s, orient="index"))).sort_values()
exp = Series([10, 20, 30, 40, 50, 60],
index=['6', '7', '8', '9', '10', '15'])
tm.assert_series_equal(outp, exp)
outp = Series(ujson.decode(ujson.encode(
s, orient="index"), numpy=True)).sort_values()
tm.assert_series_equal(outp, exp)
def testSeriesNested(self):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
nested = {'s1': s, 's2': s.copy()}
exp = {'s1': ujson.decode(ujson.encode(s)),
's2': ujson.decode(ujson.encode(s))}
self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="split")),
's2': ujson.decode(ujson.encode(s, orient="split"))}
self.assertTrue(ujson.decode(
ujson.encode(nested, orient="split")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="records")),
's2': ujson.decode(ujson.encode(s, orient="records"))}
self.assertTrue(ujson.decode(
ujson.encode(nested, orient="records")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="values")),
's2': ujson.decode(ujson.encode(s, orient="values"))}
self.assertTrue(ujson.decode(
ujson.encode(nested, orient="values")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="index")),
's2': ujson.decode(ujson.encode(s, orient="index"))}
self.assertTrue(ujson.decode(
ujson.encode(nested, orient="index")) == exp)
def testIndex(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# column indexed
outp = Index(ujson.decode(ujson.encode(i)), name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i), numpy=True), name='index')
tm.assert_index_equal(i, outp)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
outp = Index(**dec)
tm.assert_index_equal(i, outp)
self.assertTrue(i.name == outp.name)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
outp = Index(**dec)
tm.assert_index_equal(i, outp)
self.assertTrue(i.name == outp.name)
outp = Index(ujson.decode(ujson.encode(i, orient="values")),
name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="values"),
numpy=True), name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="records")),
name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="records"),
numpy=True), name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="index")),
name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="index"),
numpy=True), name='index')
tm.assert_index_equal(i, outp)
def test_datetimeindex(self):
from pandas.tseries.index import date_range
rng = date_range('1/1/2000', periods=20)
encoded = ujson.encode(rng, date_unit='ns')
decoded = DatetimeIndex(np.array(ujson.decode(encoded)))
tm.assert_index_equal(rng, decoded)
ts = Series(np.random.randn(len(rng)), index=rng)
decoded = Series(ujson.decode(ujson.encode(ts, date_unit='ns')))
idx_values = decoded.index.values.astype(np.int64)
decoded.index = DatetimeIndex(idx_values)
tm.assert_series_equal(ts, decoded)
def test_decodeArrayTrailingCommaFail(self):
input = "[31337,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayLeadingCommaFail(self):
input = "[,31337]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayOnlyCommaFail(self):
input = "[,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayUnmatchedBracketFail(self):
input = "[]]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayEmpty(self):
input = "[]"
ujson.decode(input)
def test_decodeArrayOneItem(self):
input = "[31337]"
ujson.decode(input)
def test_decodeBigValue(self):
input = "9223372036854775807"
ujson.decode(input)
def test_decodeSmallValue(self):
input = "-9223372036854775808"
ujson.decode(input)
def test_decodeTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeWithTrailingWhitespaces(self):
input = "{}\n\t "
ujson.decode(input)
def test_decodeWithTrailingNonWhitespaces(self):
try:
input = "{}\n\t a"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayWithBigInt(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayFaultyUnicode(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeFloatingPointAdditionalTests(self):
places = 15
self.assertAlmostEqual(-1.1234567893,
ujson.loads("-1.1234567893"), places=places)
self.assertAlmostEqual(-1.234567893,
ujson.loads("-1.234567893"), places=places)
self.assertAlmostEqual(-1.34567893,
ujson.loads("-1.34567893"), places=places)
self.assertAlmostEqual(-1.4567893,
ujson.loads("-1.4567893"), places=places)
self.assertAlmostEqual(-1.567893,
ujson.loads("-1.567893"), places=places)
self.assertAlmostEqual(-1.67893,
ujson.loads("-1.67893"), places=places)
self.assertAlmostEqual(-1.7893, ujson.loads("-1.7893"), places=places)
self.assertAlmostEqual(-1.893, ujson.loads("-1.893"), places=places)
self.assertAlmostEqual(-1.3, ujson.loads("-1.3"), places=places)
self.assertAlmostEqual(1.1234567893, ujson.loads(
"1.1234567893"), places=places)
self.assertAlmostEqual(1.234567893, ujson.loads(
"1.234567893"), places=places)
self.assertAlmostEqual(
1.34567893, ujson.loads("1.34567893"), places=places)
self.assertAlmostEqual(
1.4567893, ujson.loads("1.4567893"), places=places)
self.assertAlmostEqual(
1.567893, ujson.loads("1.567893"), places=places)
self.assertAlmostEqual(1.67893, ujson.loads("1.67893"), places=places)
self.assertAlmostEqual(1.7893, ujson.loads("1.7893"), places=places)
self.assertAlmostEqual(1.893, ujson.loads("1.893"), places=places)
self.assertAlmostEqual(1.3, ujson.loads("1.3"), places=places)
def test_encodeBigSet(self):
s = set()
for x in range(0, 100000):
s.add(x)
ujson.encode(s)
def test_encodeEmptySet(self):
s = set()
self.assertEqual("[]", ujson.encode(s))
def test_encodeSet(self):
s = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
enc = ujson.encode(s)
dec = ujson.decode(enc)
for v in dec:
self.assertTrue(v in s)
def _clean_dict(d):
return dict((str(k), v) for k, v in compat.iteritems(d))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
deepfield/ibis | ibis/tests/all/test_temporal.py | 1 | 10827 | import sys
import pytest
import warnings
from pytest import param
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import ibis
import ibis.expr.datatypes as dt
import ibis.tests.util as tu
from ibis.tests.backends import MapD
from ibis.pandas.execution.temporal import day_name
@pytest.mark.parametrize('attr', [
'year', 'month', 'day',
])
@tu.skipif_unsupported
def test_date_extract(backend, alltypes, df, attr):
expr = getattr(alltypes.timestamp_col.date(), attr)()
expected = getattr(df.timestamp_col.dt, attr).astype('int32')
result = expr.execute()
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize('attr', [
'year', 'month', 'day',
'hour', 'minute', 'second'
])
@tu.skipif_unsupported
def test_timestamp_extract(backend, alltypes, df, attr):
expr = getattr(alltypes.timestamp_col, attr)()
expected = getattr(df.timestamp_col.dt, attr).astype('int32')
result = expr.execute()
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize('unit', [
'Y', 'M', 'D',
param('W', marks=pytest.mark.xfail),
'h', 'm', 's', 'ms', 'us', 'ns'
])
@tu.skipif_unsupported
@tu.skipif_backend(MapD)
def test_timestamp_truncate(backend, alltypes, df, unit):
expr = alltypes.timestamp_col.truncate(unit)
dtype = 'datetime64[{}]'.format(unit)
expected = pd.Series(df.timestamp_col.values.astype(dtype))
result = expr.execute()
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize('unit', [
'Y', 'M', 'D',
param('W', marks=pytest.mark.xfail)
])
@tu.skipif_unsupported
def test_date_truncate(backend, alltypes, df, unit):
expr = alltypes.timestamp_col.date().truncate(unit)
dtype = 'datetime64[{}]'.format(unit)
expected = pd.Series(df.timestamp_col.values.astype(dtype))
result = expr.execute()
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize('unit', [
'Y', pytest.mark.xfail('Q'), 'M', 'W', 'D',
'h', 'm', 's', pytest.mark.xfail('ms'), pytest.mark.xfail('us')
])
@tu.skipif_unsupported
def test_integer_to_interval_timestamp(backend, con, alltypes, df, unit):
interval = alltypes.int_col.to_interval(unit=unit)
expr = alltypes.timestamp_col + interval
def convert_to_offset(x):
resolution = '{}s'.format(interval.type().resolution)
return pd.offsets.DateOffset(**{resolution: x})
with warnings.catch_warnings():
# both the implementation and test code raises pandas
# PerformanceWarning, because We use DateOffset addition
warnings.simplefilter('ignore')
result = con.execute(expr)
offset = df.int_col.apply(convert_to_offset)
expected = df.timestamp_col + offset
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize('unit', ['Y', pytest.mark.xfail('Q'), 'M', 'W', 'D'])
@tu.skipif_unsupported
def test_integer_to_interval_date(backend, con, alltypes, df, unit):
interval = alltypes.int_col.to_interval(unit=unit)
array = alltypes.date_string_col.split('/')
month, day, year = array[0], array[1], array[2]
date_col = expr = ibis.literal('-').join([
'20' + year, month, day
]).cast('date')
expr = date_col + interval
result = con.execute(expr)
def convert_to_offset(x):
resolution = '{}s'.format(interval.type().resolution)
return pd.offsets.DateOffset(**{resolution: x})
offset = df.int_col.apply(convert_to_offset)
expected = pd.to_datetime(df.date_string_col) + offset
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize('unit', ['h', 'm', 's', 'ms', 'us'])
@tu.skipif_unsupported
def test_integer_to_interval_date_failure(backend, con, alltypes, df, unit):
interval = alltypes.int_col.to_interval(unit=unit)
array = alltypes.date_string_col.split('/')
month, day, year = array[0], array[1], array[2]
date_col = ibis.literal('-').join(['20' + year, month, day]).cast('date')
with pytest.raises(TypeError):
date_col + interval
date_value = pd.Timestamp('2017-12-31')
timestamp_value = pd.Timestamp('2018-01-01 18:18:18')
@pytest.mark.parametrize(('expr_fn', 'expected_fn'), [
param(lambda t, be: t.timestamp_col + ibis.interval(days=4),
lambda t, be: t.timestamp_col + pd.Timedelta(days=4),
id='timestamp-add-interval'),
param(lambda t, be: t.timestamp_col - ibis.interval(days=17),
lambda t, be: t.timestamp_col - pd.Timedelta(days=17),
id='timestamp-subtract-interval'),
param(lambda t, be: t.timestamp_col.date() + ibis.interval(days=4),
lambda t, be: t.timestamp_col.dt.floor('d') + pd.Timedelta(days=4),
id='date-add-interval'),
param(lambda t, be: t.timestamp_col.date() - ibis.interval(days=14),
lambda t, be: t.timestamp_col.dt.floor('d') - pd.Timedelta(days=14),
id='date-subtract-interval'),
param(lambda t, be: t.timestamp_col - ibis.timestamp(timestamp_value),
lambda t, be: pd.Series(
t.timestamp_col.sub(timestamp_value).values.astype(
'timedelta64[{}]'.format(be.returned_timestamp_unit))),
id='timestamp-subtract-timestamp'),
param(lambda t, be: t.timestamp_col.date() - ibis.date(date_value),
lambda t, be: t.timestamp_col.dt.floor('d') - date_value,
id='date-subtract-date'),
])
@tu.skipif_unsupported
def test_temporal_binop(backend, con, alltypes, df,
expr_fn, expected_fn):
expr = expr_fn(alltypes, backend)
expected = expected_fn(df, backend)
result = con.execute(expr)
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('ibis_pattern', 'pandas_pattern'),
[
('%Y%m%d', '%Y%m%d')
]
)
@tu.skipif_unsupported
def test_strftime(backend, con, alltypes, df, ibis_pattern, pandas_pattern):
expr = alltypes.timestamp_col.strftime(ibis_pattern)
expected = df.timestamp_col.dt.strftime(pandas_pattern)
result = expr.execute()
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
unit_factors = {
's': int(1e9),
'ms': int(1e6),
'us': int(1e3),
}
@pytest.mark.parametrize(
'unit', ['D', 's', 'ms', pytest.mark.xfail('us'), pytest.mark.xfail('ns')]
)
@tu.skipif_unsupported
def test_to_timestamp(backend, con, alltypes, df, unit):
if unit not in backend.supported_to_timestamp_units:
pytest.skip(
'Unit {!r} not supported by {} to_timestamp'.format(unit, backend))
backend_unit = backend.returned_timestamp_unit
factor = unit_factors[unit]
ts = ibis.timestamp('2018-04-13 09:54:11.872832')
pandas_ts = ibis.pandas.execute(ts).floor(unit).value
# convert the now timestamp to the input unit being tested
int_expr = ibis.literal(pandas_ts // factor)
expr = int_expr.to_timestamp(unit)
result = con.execute(expr)
expected = pd.Timestamp(pandas_ts, unit='ns').floor(backend_unit)
assert result == expected
@pytest.mark.parametrize(
('date', 'expected_index', 'expected_day'),
[
('2017-01-01', 6, 'Sunday'),
('2017-01-02', 0, 'Monday'),
('2017-01-03', 1, 'Tuesday'),
('2017-01-04', 2, 'Wednesday'),
('2017-01-05', 3, 'Thursday'),
('2017-01-06', 4, 'Friday'),
('2017-01-07', 5, 'Saturday'),
]
)
@tu.skipif_unsupported
def test_day_of_week_scalar(backend, con, date, expected_index, expected_day):
expr = ibis.literal(date).cast(dt.date)
result_index = con.execute(expr.day_of_week.index())
assert result_index == expected_index
result_day = con.execute(expr.day_of_week.full_name())
assert result_day.lower() == expected_day.lower()
@tu.skipif_unsupported
def test_day_of_week_column(backend, con, alltypes, df):
expr = alltypes.timestamp_col.day_of_week
result_index = expr.index().execute()
expected_index = df.timestamp_col.dt.dayofweek.astype('int16')
backend.assert_series_equal(
result_index, expected_index, check_names=False)
result_day = expr.full_name().execute()
expected_day = day_name(df.timestamp_col.dt)
backend.assert_series_equal(result_day, expected_day, check_names=False)
@pytest.mark.parametrize(
('day_of_week_expr', 'day_of_week_pandas'),
[
(
lambda t: t.timestamp_col.day_of_week.index().count(),
lambda s: s.dt.dayofweek.count(),
),
(
lambda t: t.timestamp_col.day_of_week.full_name().length().sum(),
lambda s: day_name(s.dt).str.len().sum(),
)
]
)
@tu.skipif_unsupported
def test_day_of_week_column_group_by(
backend, con, alltypes, df, day_of_week_expr, day_of_week_pandas
):
expr = alltypes.groupby('string_col').aggregate(
day_of_week_result=day_of_week_expr
)
schema = expr.schema()
assert schema['day_of_week_result'] == dt.int64
result = expr.execute().sort_values('string_col')
expected = df.groupby('string_col').timestamp_col.apply(
day_of_week_pandas
).reset_index().rename(columns=dict(timestamp_col='day_of_week_result'))
# FIXME(#1536): Pandas backend should use query.schema().apply_to
backend.assert_frame_equal(
result,
expected,
check_dtype=False,
# python 2's handling of strings is annoying here wrt sqlalchemy's
# column name string subclass
check_column_type=sys.version_info.major != 2
)
@tu.skipif_unsupported
@tu.skipif_backend(MapD)
def test_now(backend, con):
expr = ibis.now()
result = con.execute(expr)
pandas_now = pd.Timestamp('now')
assert isinstance(result, pd.Timestamp)
# this could fail if we're testing in different timezones and we're testing
# on Dec 31st
assert result.year == pandas_now.year
@tu.skipif_unsupported
@tu.skipif_backend(MapD)
def test_now_from_projection(backend, con, alltypes, df):
n = 5
expr = alltypes[[ibis.now().name('ts')]].limit(n)
result = expr.execute()
ts = result.ts
assert isinstance(result, pd.DataFrame)
assert isinstance(ts, pd.Series)
assert issubclass(ts.dtype.type, np.datetime64)
assert len(result) == n
assert ts.nunique() == 1
now = pd.Timestamp('now')
year_expected = pd.Series([now.year] * n, name='ts')
tm.assert_series_equal(ts.dt.year, year_expected)
| apache-2.0 |
sonnyhu/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 20 | 3180 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
psychopy/versions | psychopy/app/themes/_themes.py | 1 | 42072 | import os
import subprocess
import sys
import wx
import wx.lib.agw.aui as aui
import wx.stc as stc
from psychopy.localization import _translate
from wx import py
import keyword
import builtins
from pathlib import Path
from psychopy import prefs
from psychopy import logging
import psychopy
from ...experiment import components
import json
if sys.platform=='win32':
from matplotlib import font_manager
fm = font_manager.FontManager()
thisFolder = Path(__file__).parent
iconsPath = Path(prefs.paths['resources'])
try:
FileNotFoundError
except NameError:
# Py2 has no FileNotFoundError
FileNotFoundError = IOError
allCompons = components.getAllComponents() # ensures that the icons get checked
# Create library of "on brand" colours
cLib = {
'none': [127, 127, 127, 0],
'black': [0, 0, 0],
'grey': [102, 102, 110],
'white': [242, 242, 242],
'red': [242, 84, 91],
'green': [108, 204, 116],
'blue': [2, 169, 234],
'yellow': [241, 211, 2],
'orange': [236, 151, 3],
'purple': [195, 190, 247],
'darker': {},
'lighter': {},
'very': {'lighter': {},
'darker': {}}
}
# Create light and dark variants of each colour by +-15 to each value
for c in cLib:
if not c in ['darker', 'lighter', 'none', 'very']:
cLib['darker'][c] = [max(0, n-15) for n in cLib[c]]
cLib['lighter'][c] = [min(255, n+15) for n in cLib[c]]
# Create very light and very dark variants of each colour by a further +-30 to each value
for c in cLib['lighter']:
cLib['very']['lighter'][c] = [min(255, n+30) for n in cLib['lighter'][c]]
for c in cLib['darker']:
cLib['very']['darker'][c] = [max(0, n-30) for n in cLib['darker'][c]]
class ThemeMixin:
lexers = {
stc.STC_LEX_PYTHON: "python",
stc.STC_LEX_CPP: "c++",
stc.STC_LEX_R: "R"
}
# these are populated and modified by PsychoPyApp.theme.setter
spec = None
codetheme = 'PsychopyLight'
mode = 'light'
icons = 'light'
codeColors = {}
appColors = {}
appIcons = {'components': {},
'resources': {}}
def loadThemeSpec(self, themeName):
"""Load a spec file from disk"""
# a theme spec contains the spec for the *code* theme as well as a mode
# that determines which colorscheme to load for the app (separate)
themesPath = Path(prefs.paths['themes'])
# first load the *theme* which contains the mode name for the app
try:
with open(str(themesPath / (themeName+".json")), "rb") as fp:
ThemeMixin.spec = themeSpec = json.load(fp)
except FileNotFoundError:
with open(str(themesPath / "PsychopyLight.json"), "rb") as fp:
ThemeMixin.spec = themeSpec = json.load(fp)
appColorMode = themeSpec['app']
# Get app spec
try:
with open(str(themesPath / "app/{}.json".format(appColorMode)), "rb") as fp:
ThemeMixin.spec = appColors = json.load(fp)
except FileNotFoundError:
with open(str(themesPath / "app/light.json"), "rb") as fp:
ThemeMixin.spec = appColors = json.load(fp)
# Set app theme
ThemeMixin.mode = appColorMode
self._setAppColors(appColors)
# Set app icons
if 'icons' in themeSpec:
ThemeMixin.icons = themeSpec['icons']
else:
ThemeMixin.icons = themeSpec['app']
# Set coder theme
codertheme = themeSpec
ThemeMixin.codetheme = themeName
self._setCodeColors(codertheme)
def _applyAppTheme(self, target=None):
"""Applies colorScheme recursively to the target and its children
Parameters
----------
colorScheme: the new color spec being applied (dict)
target: the wx object to which being applied
depth: depth in the tree of wx objects
"""
# Define subfunctions to handle different object types
def applyToToolbar(target):
target.SetBackgroundColour(ThemeMixin.appColors['frame_bg'])
# Clear tools
target.ClearTools()
# Redraw tools
target.makeTools()
def applyToStatusBar(target):
target.SetBackgroundColour(cLib['white'])
def applyToFrame(target):
target.SetBackgroundColour(ThemeMixin.appColors['frame_bg'])
target.SetForegroundColour(ThemeMixin.appColors['text'])
if hasattr(target, 'GetAuiManager'):
target.GetAuiManager().SetArtProvider(PsychopyDockArt())
target.GetAuiManager().Update()
for menu in target.GetMenuBar().GetMenus():
for submenu in menu[0].MenuItems:
if isinstance(submenu.SubMenu, ThemeSwitcher):
submenu.SubMenu._applyAppTheme()
def applyToPanel(target):
target.SetBackgroundColour(ThemeMixin.appColors['panel_bg'])
target.SetForegroundColour(ThemeMixin.appColors['text'])
def applyToNotebook(target):
# Dict of icons to apply to specific tabs
tabIcons = {
"Structure": "coderclass16.png",
"FileBrowser": "folder-open16.png",
"PythonShell": "coderpython16.png"
}
target.SetArtProvider(PsychopyTabArt())
target.GetAuiManager().SetArtProvider(PsychopyDockArt())
for index in range(target.GetPageCount()):
page = target.GetPage(index)
page.SetBackgroundColour(ThemeMixin.appColors['panel_bg'])
if page.GetName() in tabIcons:
bmp = IconCache.getBitmap(IconCache(), tabIcons[page.GetName()])
target.SetPageBitmap(index, bmp)
page._applyAppTheme()
def applyToCodeEditor(target):
spec = ThemeMixin.codeColors.copy()
base = spec['base']
# Set margin size according to text size
if not isinstance(target, wx.py.shell.Shell):
target.SetMarginWidth(0, 4 * prefs.coder['codeFontSize'])
# Override base font with user spec if present
prefkey = 'outputFont' if isinstance(target, wx.py.shell.Shell) else 'codeFont'
if prefs.coder[prefkey].lower() != "From Theme...".lower():
for key in spec:
if 'font' in spec[key]:
spec[key]['font'] = prefs.coder[prefkey] if spec[key]['font'] == base['font'] \
else base['font']
base['font'] = prefs.coder[prefkey]
# Check that key is in tag list
invalid = []
for key in spec:
if key not in self.tags:
invalid += [key]
for key in invalid:
del spec[key]
# Check for language specific spec
if target.GetLexer() in target.lexers:
lexer = target.lexers[target.GetLexer()]
else:
lexer = 'invlex'
if lexer in spec:
# If there is lang specific spec, delete subkey...
lang = spec['lexer'] # ...and append spec to root, overriding any generic spec
spec.update({key: lang[key] for key in lang})
else:
lang = {}
# Set style for undefined lexers
for key in [getattr(wx._stc, item) for item in dir(wx._stc) if item.startswith("STC_LEX")]:
target.StyleSetBackground(key, base['bg'])
target.StyleSetForeground(key, base['fg'])
target.StyleSetSpec(key, "face:%(font)s,size:%(size)d" % base)
# Set style from universal data
for key in spec:
if target.tags[key] is not None:
target.StyleSetBackground(target.tags[key], spec[key]['bg'])
target.StyleSetForeground(target.tags[key], spec[key]['fg'])
target.StyleSetSpec(target.tags[key], "face:%(font)s,size:%(size)d" % spec[key])
# Apply keywords
for level, val in target.lexkw.items():
target.SetKeyWords(level, " ".join(val))
# Set margin
target.SetFoldMarginColour(True, spec['margin']['bg'])
target.SetFoldMarginHiColour(True, spec['margin']['bg'])
# Set caret colour
target.SetCaretForeground(spec['caret']['fg'])
target.SetCaretLineBackground(spec['caret']['bg'])
target.SetCaretWidth(1 + ('bold' in spec['caret']['font']))
# Set selection colour
target.SetSelForeground(True, spec['select']['fg'])
target.SetSelBackground(True, spec['select']['bg'])
# Set wrap point
target.edgeGuideColumn = target.prefs['edgeGuideColumn']
target.edgeGuideVisible = target.edgeGuideColumn > 0
# Set line spacing
spacing = min(int(target.prefs['lineSpacing'] / 2), 64) # Max out at 64
target.SetExtraAscent(spacing)
target.SetExtraDescent(spacing)
def applyToRichText(target):
base = ThemeMixin.codeColors['base']
# todo: Add element-specific styling (it must be possible...)
# If dealing with a StdOut, set background from base
target.SetBackgroundColour(
self.hex2rgb(base['bg'], base['bg']))
# Then construct default styles
bold = wx.FONTWEIGHT_BOLD if "bold" in base['font'] else wx.FONTWEIGHT_NORMAL
italic = wx.FONTSTYLE_ITALIC if "italic" in base['font'] else wx.FONTSTYLE_NORMAL
# Override base font with user spec if present
if prefs.coder['outputFont'].lower() == "From Theme...".lower():
fontName = base['font'].replace("bold", "").replace("italic", "").replace(",", "")
else:
fontName = prefs.coder['outputFont']
_font = wx.Font(
int(prefs.coder['outputFontSize']),
wx.FONTFAMILY_TELETYPE, italic,
bold, False,
faceName=fontName
)
_style = wx.TextAttr(
colText=wx.Colour(
self.hex2rgb(base['fg'], base['fg'])),
colBack=wx.Colour(
self.hex2rgb(base['bg'], base['bg'])),
font=_font)
# Then style all text as base
i = 0
for ln in range(target.GetNumberOfLines()):
i += target.GetLineLength(
ln) + 1 # +1 as \n is not included in character count
target.SetStyle(0, i, _style)
def applyToTextCtrl(target):
base = ThemeMixin.codeColors['base']
target.SetForegroundColour(base['fg'])
target.SetBackgroundColour(base['bg'])
# Define dict linking object types to subfunctions
handlers = {
wx.Frame: applyToFrame,
wx.Panel: applyToPanel,
aui.AuiNotebook: applyToNotebook,
psychopy.app.coder.coder.BaseCodeEditor: applyToCodeEditor,
wx.richtext.RichTextCtrl: applyToRichText,
wx.py.shell.Shell: applyToCodeEditor,
wx.ToolBar: applyToToolbar,
wx.StatusBar: applyToStatusBar,
wx.TextCtrl: applyToTextCtrl
}
# If no target supplied, default to using self
if target is None:
target = self
if not hasattr(self, '_recursionDepth'):
self._recursionDepth = 0
else:
self._recursionDepth += 1
appCS = ThemeMixin.appColors
base = ThemeMixin.codeColors['base']
# Abort if target is immune
if hasattr(target, 'immune'):
return
# Style target
isHandled = False
for thisType in handlers:
if isinstance(target, thisType):
handlers[thisType](target)
isHandled = True
if not isHandled:
# try and set colors for target
try:
target.SetBackgroundColour(ThemeMixin.appColors['panel_bg'])
target.SetForegroundColour(ThemeMixin.appColors['text'])
except AttributeError:
pass
# search for children (set in a second step)
if isinstance(target, wx.Sizer):
sizer = target
children = sizer.Children
else:
children = []
if hasattr(target, 'Children'):
children.extend(target.Children)
elif hasattr(target, 'immune'):
pass
elif hasattr(target, 'paneManager'):
for pane in target.paneManager.AllPanes:
children.append(pane.window)
elif hasattr(target, 'Sizer') and target.Sizer:
children.append(target.Sizer)
if hasattr(self, 'btnHandles'):
for thisBtn in self.btnHandles:
pass
# then apply to all children as well
for c in children:
if hasattr(c, '_applyAppTheme'):
# if the object understands themes then request that
c._applyAppTheme()
elif self._recursionDepth>10:
return
else:
# if not then use our own recursive method to search
if hasattr(c, 'Window') and c.Window is not None:
ThemeMixin._applyAppTheme(c.Window)
elif hasattr(c, 'Sizer') and c.Sizer is not None:
ThemeMixin._applyAppTheme(c.Sizer)
# and then apply
# try:
# ThemeMixin._applyAppTheme(c)
# except AttributeError:
# pass
if hasattr(target, 'Refresh'):
target.Refresh()
if hasattr(target, '_mgr'):
target._mgr.Update()
@property
def lexkw(self):
baseC = {
0: ['typedef', 'if', 'else', 'return', 'struct', 'for', 'while', 'do',
'using', 'namespace', 'union', 'break', 'enum', 'new', 'case',
'switch', 'continue', 'volatile', 'finally', 'throw', 'try',
'delete', 'typeof', 'sizeof', 'class', 'volatile'],
1: ['int', 'float', 'double', 'char', 'short', 'byte', 'void', 'const',
'unsigned', 'signed', 'NULL', 'true', 'false', 'bool', 'size_t',
'long', 'long long']
}
if self.GetLexer() == stc.STC_LEX_PYTHON:
# Python
keywords = {
0: keyword.kwlist + ['cdef', 'ctypedef', 'extern', 'cimport', 'cpdef', 'include'],
1: dir(builtins) + ['self']
}
elif self.GetLexer() == stc.STC_LEX_R:
# R
keywords = {
1: ['function', 'for', 'repeat', 'while', 'if', 'else',
'break', 'local', 'global'],
0: ['NA']
}
elif self.GetLexer() == stc.STC_LEX_CPP:
# C/C++
keywords = baseC
if hasattr(self, 'filename'):
if self.filename.endswith('.js'):
# JavaScript
keywords = {
0: ['var', 'const', 'let', 'import', 'function', 'if', 'else', 'return', 'struct', 'for', 'while', 'do',
'finally', 'throw', 'try', 'switch', 'case', 'break'],
1: ['null', 'false', 'true']
}
# elif self.GetLexer() == stc.STC_LEX_ARDUINO:
# # Arduino
# keywords = {
# 0: baseC[0],
# 1: baseC[1] + [
# 'BIN', 'HEX', 'OCT', 'DEC', 'INPUT', 'OUTPUT', 'HIGH', 'LOW',
# 'INPUT_PULLUP', 'LED_BUILTIN', 'string', 'array']
# }
# elif self.GetLexer() == stc.STC_LEX_GLSL:
# # GLSL
# glslTypes = []
# baseType = ['', 'i', 'b', 'd']
# dim = ['2', '3', '4']
# name = ['vec', 'mat']
# for i in baseType:
# for j in name:
# for k in dim:
# glslTypes.append(i + j + k)
# keywords = {
# 0: baseC[0] + ['invariant', 'precision', 'highp', 'mediump', 'lowp', 'coherent',
# 'sampler', 'sampler2D'],
# 1: baseC[1]
# }
else:
keywords = {
0: [],
1: []
}
return keywords
@property
def tags(self):
tags = {
"base": stc.STC_STYLE_DEFAULT,
"margin": stc.STC_STYLE_LINENUMBER,
"caret": None,
"select": None,
"indent": stc.STC_STYLE_INDENTGUIDE,
"brace": stc.STC_STYLE_BRACELIGHT,
"controlchar": stc.STC_STYLE_CONTROLCHAR
}
if self.GetLexer() == stc.STC_LEX_PYTHON:
# Python
tags.update({
"operator": stc.STC_P_OPERATOR,
"keyword": stc.STC_P_WORD,
"keyword2": stc.STC_P_WORD2,
"id": stc.STC_P_IDENTIFIER,
"num": stc.STC_P_NUMBER,
"char": stc.STC_P_CHARACTER,
"str": stc.STC_P_STRING,
"openstr": stc.STC_P_STRINGEOL,
"decorator": stc.STC_P_DECORATOR,
"def": stc.STC_P_DEFNAME,
"class": stc.STC_P_CLASSNAME,
"comment": stc.STC_P_COMMENTLINE,
"commentblock": stc.STC_P_COMMENTBLOCK,
"documentation": stc.STC_P_TRIPLE,
"documentation2": stc.STC_P_TRIPLEDOUBLE,
"whitespace": stc.STC_P_DEFAULT
})
elif self.GetLexer() == stc.STC_LEX_R:
# R
tags.update({
"operator": stc.STC_R_OPERATOR,
"keyword": stc.STC_R_BASEKWORD,
"keyword2": stc.STC_R_KWORD,
"id": stc.STC_R_IDENTIFIER,
"num": stc.STC_R_NUMBER,
"char": stc.STC_R_STRING2,
"str": stc.STC_R_STRING,
"infix": stc.STC_R_INFIX,
"openinfix": stc.STC_R_INFIXEOL,
"comment": stc.STC_R_COMMENT,
"whitespace": stc.STC_R_DEFAULT
})
elif self.GetLexer() == stc.STC_LEX_CPP:
# C/C++
tags.update({
"operator": stc.STC_C_OPERATOR,
"keyword": stc.STC_C_WORD,
"keyword2": stc.STC_C_WORD2,
"id": stc.STC_C_IDENTIFIER,
"num": stc.STC_C_NUMBER,
"char": stc.STC_C_CHARACTER,
"str": stc.STC_C_STRING,
"openstr": stc.STC_C_STRINGEOL,
"class": stc.STC_C_GLOBALCLASS,
"comment": stc.STC_C_COMMENT,
"commentblock": stc.STC_C_COMMENTLINE,
"commentkw": stc.STC_C_COMMENTDOCKEYWORD,
"commenterror": stc.STC_C_COMMENTDOCKEYWORDERROR,
"documentation": stc.STC_C_COMMENTLINEDOC,
"documentation2": stc.STC_C_COMMENTDOC,
"whitespace": stc.STC_C_DEFAULT
})
return tags
def hex2rgb(self, hex, base=(0, 0, 0, 255)):
if not isinstance(hex, str):
return base
# Make hex code case irrelevant
hex = hex.lower()
# dict of hex -> int conversions
hexkeys = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
'#': None}
# Check that hex is a hex code
if not all(c in hexkeys.keys() for c in hex) or not len(hex) == 7:
# Default to transparent if not
return wx.Colour(base)
# Convert to rgb
r = hexkeys[hex[1]] * 16 + hexkeys[hex[2]]
g = hexkeys[hex[3]] * 16 + hexkeys[hex[4]]
b = hexkeys[hex[5]] * 16 + hexkeys[hex[6]]
return wx.Colour(r, g, b, 255)
def shiftColour(self, col, offset=15):
"""Shift colour up or down by a set amount"""
if not isinstance(col, wx.Colour):
return
if col.GetLuminance() < 0.5:
newCol = wx.Colour(
[c+offset for c in col.Get()]
)
else:
newCol = wx.Colour(
[c - offset for c in col.Get()]
)
return newCol
def extractFont(self, fontList, base=[]):
"""Extract specified font from theme spec"""
# Convert to list if not already
if isinstance(base, str):
base = base.split(",")
base = base if isinstance(base, list) else [base]
if isinstance(fontList, str):
fontList = fontList.split(",")
fontList = fontList if isinstance(fontList, list) else [fontList]
# Extract styles
bold, italic = [], []
if "bold" in fontList:
bold = [fontList.pop(fontList.index("bold"))]
if "italic" in fontList:
italic = [fontList.pop(fontList.index("italic"))]
# Extract styles from base, if needed
if "bold" in base:
bold = [base.pop(base.index("bold"))]
if "italic" in base:
italic = [base.pop(base.index("italic"))]
# Append base and default fonts
fontList.extend(base+["Consolas", "Monaco", "Lucida Console"])
# Set starting font in case none are found
if sys.platform == 'win32':
finalFont = [wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT).GetFaceName()]
else:
finalFont = [wx.SystemSettings.GetFont(wx.SYS_ANSI_FIXED_FONT).GetFaceName()]
# Cycle through font names, stop at first valid font
if sys.platform == 'win32':
for font in fontList:
if fm.findfont(font) not in fm.defaultFont.values():
finalFont = [font] + bold + italic
break
return ','.join(finalFont)
def _setCodeColors(self, spec):
"""To be called from _psychopyApp only"""
#if not self.GetTopWindow() == self:
# psychopy.logging.warning("This function should only be called from _psychopyApp")
base = spec['base']
base['font'] = self.extractFont(base['font'])
# Make sure there's some spec for margins
if 'margin' not in spec:
spec['margin'] = base
# Make sure there's some spec for caret
if 'caret' not in spec:
spec['caret'] = base
# Make sure there's some spec for selection
if 'select' not in spec:
spec['select'] = base
spec['select']['bg'] = self.shiftColour(base['bg'], 30)
# Pythonise the universal data (hex -> rgb, tag -> wx int)
invalid = []
for key in spec:
# Check that full spec is defined, discard if not
if all(subkey in spec[key] for subkey in ['bg', 'fg', 'font']):
spec[key]['bg'] = self.hex2rgb(spec[key]['bg'], base['bg'])
spec[key]['fg'] = self.hex2rgb(spec[key]['fg'], base['fg'])
spec[key]['font'] = self.extractFont(spec[key]['font'], base['font'])
spec[key]['size'] = int(prefs.coder['codeFontSize'])
elif key in ['app', 'icons']:
pass
else:
invalid += [key]
for key in invalid:
del spec[key]
# we have a valid theme so continue
for key in spec:
ThemeMixin.codeColors[key] = spec[key] # class attribute for all mixin subclasses
def _setAppColors(self, spec):
hexchars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f']
formats = {
"hex|named": [str],
"subnamed1": [str, str],
"subnamed2": [str, str, str],
"hex|named_opacity1": [str, int],
"subnamed1_opacity1": [str, str, int],
"subnamed2_opacity1": [str, str, str, int],
"hex|named_opacity2": [str, float],
"subnamed1_opacity2": [str, str, float],
"subnamed2_opacity2": [str, str, str, float]
}
# Cycle through all values
for key in spec:
# if key not in output:
# continue
val = spec[key]
color = ['invalid']
# Make sure every value is a list
if not isinstance(val, list):
val = [val]
# Figure out what format current spec is in
types = [type(v) for v in val]
format = "invalid"
for f in formats:
if formats[f] == types:
format = f
# Pop out opacity so that it can be assumed not present
if "_opacity" in format:
opacity = round(val.pop(-1))
format = format.replace("_opacity", "")
else:
opacity = 255
# Tell the difference between hex and single named values
if "hex|named" in format:
if val[0] in cLib:
# Extract named colour
color = cLib[val[0]]
format = format.replace("hex|", "")
elif len(val[0]) == 7:
hex = val[0]
if hex[0] == "#" and all([h in hexchars for h in hex[1:].lower()]):
# Convert hex colour
format = format.replace("|named", "")
wxcolor = ThemeMixin.hex2rgb(None, hex)
color = list(wxcolor[:3])
else:
format = "invalid"
else:
format = "invalid"
if "subnamed" in format:
if len(val) == 2 and all([v in cLib for v in val]):
color = cLib[val[0]][val[1]]
elif len(val) == 3 and all([v in cLib for v in val]):
color = cLib[val[0]][val[1]][val[2]]
else:
format = "invalid"
if format == "invalid" \
or "color" not in locals() \
or "opacity" not in locals() \
or "invalid" in color:
raise Exception("Invalid app colour spec")
else:
ThemeMixin.appColors[key] = wx.Colour(color + [opacity])
def getBitmap(name, theme, size=None,
emblem=None, emblemPos='bottom_right'):
"""Retrieves the wx.Bitmap based on name, theme, size and emblem"""
global _allIcons
return _allIcons.getBitmap(name, theme, size, emblem, emblemPos)
class IconCache:
"""A class to load icons and store them just once as a dict of wx.Bitmap
objects according to theme"""
_theme = ThemeMixin
_bitmaps = {}
_buttons = [] # a list of all created buttons
_lastBGColor = None
_lastIcons = None
# def _loadComponentIcons(self, folderList=(), theme=None, forceReload=False):
# """load the icons for all the components
# """
# if theme is None:
# theme = _IconCache.iconTheme
# if forceReload or len(self)==0:
# compons = experiment.getAllComponents(folderList)
# _allIcons = {}
# for thisName, thisCompon in compons.items():
# if thisName in components.iconFiles:
# # darkmode paths
# if "base.png" not in components.iconFiles[thisName]:
# iconFolder = theme
# components.iconFiles[thisName] = join(
# dirname(components.iconFiles[thisName]),
# iconFolder,
# basename(components.iconFiles[thisName])
# )
# _allIcons[thisName] = self._loadIcons(
# components.iconFiles[thisName])
# else:
# _allIcons[thisName] = self._loadIcons(None)
# return _allIcons
# else:
# return _allIcons
def _findImageFile(self, name, theme, emblem=None, size=None):
"""Tries to find a valid icon in a range of places with and without a
size suffix"""
orig = Path(name)
if not orig.suffix: # check we have an image suffix
orig = Path(name+'.png')
if emblem: # add the emblem into the name
orig = orig.with_name(
"{}_{}{}".format(orig.stem, emblem, orig.suffix))
nameAndSize = orig.with_name(orig.stem+str(size)+orig.suffix)
nameAndDouble = orig.with_name(orig.stem+str(size)+"@2x"+orig.suffix)
for filename in [nameAndSize, orig, nameAndDouble]:
# components with no themes folders (themes were added in 2020.2)
if filename.exists():
return str(filename)
# components with theme folders
# try using the theme name (or 'light' as a default name)
for themeName in [theme, 'light']:
thisPath = filename.parent / themeName / filename.name
if thisPath.exists():
return str(thisPath)
# try in the app icons folder (e.g. for "run.png")
thisPath = iconsPath / theme / filename
if thisPath.exists():
return str(thisPath)
# and in the root of the app icons
thisPath = iconsPath / filename
if thisPath.exists():
return str(thisPath)
# still haven't returned nay path. Out of ideas!
logging.warning("Failed to find icon name={}, theme={}, "
"size={}, emblem={}"
.format(name, theme, size, emblem))
def _loadBitmap(self, name, theme, size=None, emblem=None):
"""Creates wxBitmaps based on the image.
png files work best, but anything that wx.Image can load should be fine
Doesn't return the icons, just stores them in the dict
"""
filename = self._findImageFile(name, theme, emblem, size)
if not filename:
filename = self._findImageFile('unknown.png', theme, emblem, size)
# load image with wx.LogNull() to stop libpng complaining about sRGB
nologging = wx.LogNull()
try:
im = wx.Image(filename)
except TypeError:
raise FileNotFoundError("Failed to find icon name={}, theme={}, "
"size={}, emblem={}"
.format(name, theme, size, emblem))
del nologging # turns logging back on
pix = im.GetSize()[0]
if pix > size:
im = im.Scale(pix, pix)
nameMain = _getIdentifier(name, theme, emblem, size)
self._bitmaps[nameMain] = wx.Bitmap(im)
if pix > 24: # for bigger images lets create a 1/2 size one too
nameSmall = _getIdentifier(name, theme, emblem, pix//2)
self._bitmaps[nameSmall] = wx.Bitmap(im.Scale(pix//2, pix//2))
def getBitmap(self, name, theme=None, size=None, emblem=None):
"""Retrieves an icon based on its name, theme, size and emblem
either from the cache or loading from file as needed"""
if theme is None:
theme = ThemeMixin.icons
if size is None:
size = 48
identifier = _getIdentifier(name, theme=theme, emblem=emblem, size=size)
# find/load the bitmaps first
if identifier not in IconCache._bitmaps:
# load all size icons for this name
self._loadBitmap(name, theme, emblem=emblem, size=size)
return IconCache._bitmaps[identifier]
def makeBitmapButton(self, parent, filename,
name="", # name of Component e.g. TextComponent
label="", # label on the button, often short name
emblem=None,
toolbar=None, tip=None, size=None,
tbKind=wx.ITEM_NORMAL, theme=None):
if theme is None:
theme = ThemeMixin.icons
bmp = self.getBitmap(filename, theme, size, emblem)
if toolbar:
if 'phoenix' in wx.PlatformInfo:
button = toolbar.AddTool(wx.ID_ANY, label=label,
bitmap=bmp, shortHelp=tip,
kind=tbKind)
else:
button = toolbar.AddSimpleTool(wx.ID_ANY, label=label,
bitmap=bmp, shortHelp=tip,
kind=tbKind)
else:
button = wx.Button(parent, wx.ID_ANY,
label=label, name=name, style=wx.NO_BORDER)
button.SetBitmap(bmp)
button.SetBitmapPosition(wx.TOP)
button.SetBackgroundColour(ThemeMixin.appColors['frame_bg'])
# just for regular buttons (not toolbar objects) we can re-use
buttonInfo = {'btn': button,
'filename': filename,
'size': size,
'emblem': emblem,
'theme': theme}
self._buttons.append(buttonInfo)
if tip:
button.SetToolTip(wx.ToolTip(tip))
return button
def getComponentButton(self, parent, name, label,
theme=None, size=None, emblem=None,
tip=""):
"""Checks in the experiment.components.iconFiles for filename and
loads it into a wx.Bitmap"""
if name in components.iconFiles:
filename = components.iconFiles[name]
btn = self.makeBitmapButton(
parent=parent,
filename=filename, name=name, label=label,
tip=tip, size=size)
return btn
def getComponentBitmap(self, name, size=None):
"""Checks in the experiment.components.iconFiles for filename and
loads it into a wx.Bitmap"""
if type(name) != str: # got a class instead of a name?
name = name.getType()
if name in components.iconFiles:
filename = components.iconFiles[name]
bmp = self.getBitmap(name=filename, size=size)
return bmp
else:
print(components.iconFiles)
raise ValueError("Failed to find '{}' in components.iconFiles"
.format(name))
def setTheme(self, theme):
if theme.icons != IconCache._lastIcons:
for thisBtn in IconCache._buttons:
if thisBtn['btn']: # Check that button hasn't been deleted
newBmp = self.getBitmap(name=thisBtn['filename'],
size=thisBtn['size'],
theme=theme.icons,
emblem=thisBtn['emblem'])
thisBtn['btn'].SetBitmap(newBmp)
thisBtn['btn'].SetBitmapCurrent(newBmp)
thisBtn['btn'].SetBitmapPressed(newBmp)
thisBtn['btn'].SetBitmapFocus(newBmp)
thisBtn['btn'].SetBitmapDisabled(newBmp)
thisBtn['btn'].SetBitmapPosition(wx.TOP)
IconCache._lastIcons = theme.icons
if theme.appColors['frame_bg'] != IconCache._lastBGColor:
for thisBtn in IconCache._buttons:
try:
thisBtn['btn'].SetBackgroundColour(
theme.appColors['frame_bg'])
except RuntimeError:
pass
IconCache._lastBGColor = theme
def _getIdentifier(name, theme, emblem, size=None):
if size is None:
return "{}_{}_{}".format(name, theme, emblem)
else:
return "{}_{}_{}_{}".format(name, theme, emblem, size)
class PsychopyTabArt(aui.AuiDefaultTabArt, ThemeMixin):
def __init__(self):
aui.AuiDefaultTabArt.__init__(self)
self.SetDefaultColours()
self.SetAGWFlags(aui.AUI_NB_NO_TAB_FOCUS)
def SetDefaultColours(self):
"""
Sets the default colours, which are calculated from the given base colour.
:param `base_colour`: an instance of :class:`wx.Colour`. If defaulted to ``None``, a colour
is generated accordingly to the platform and theme.
"""
cs = ThemeMixin.appColors
self.SetBaseColour( wx.Colour(cs['tab_bg']) )
self._background_top_colour = wx.Colour(cs['panel_bg'])
self._background_bottom_colour = wx.Colour(cs['panel_bg'])
self._tab_text_colour = lambda page: cs['text']
self._tab_top_colour = wx.Colour(cs['tab_bg'])
self._tab_bottom_colour = wx.Colour(cs['tab_bg'])
self._tab_gradient_highlight_colour = wx.Colour(cs['tab_bg'])
self._border_colour = wx.Colour(cs['tab_bg'])
self._border_pen = wx.Pen(self._border_colour)
self._tab_disabled_text_colour = cs['text']
self._tab_inactive_top_colour = wx.Colour(cs['panel_bg'])
self._tab_inactive_bottom_colour = wx.Colour(cs['panel_bg'])
def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False):
"""
Extends AuiDefaultTabArt.DrawTab to add a transparent border to inactive tabs
"""
if page.active:
self._border_pen = wx.Pen(self._border_colour)
else:
self._border_pen = wx.TRANSPARENT_PEN
out_tab_rect, out_button_rect, x_extent = aui.AuiDefaultTabArt.DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control)
return out_tab_rect, out_button_rect, x_extent
class PsychopyDockArt(aui.AuiDefaultDockArt):
def __init__(self):
aui.AuiDefaultDockArt.__init__(self)
cs = ThemeMixin.appColors
# Gradient
self._gradient_type = aui.AUI_GRADIENT_NONE
# Background
self._background_colour = wx.Colour(cs['frame_bg'])
self._background_gradient_colour = wx.Colour(cs['frame_bg'])
self._background_brush = wx.Brush(self._background_colour)
# Border
self._border_size = 0
self._border_pen = wx.Pen(cs['frame_bg'])
# Sash
self._draw_sash = True
self._sash_size = 5
self._sash_brush = wx.Brush(cs['frame_bg'])
# Gripper
self._gripper_brush = wx.Brush(cs['frame_bg'])
self._gripper_pen1 = wx.Pen(cs['frame_bg'])
self._gripper_pen2 = wx.Pen(cs['frame_bg'])
self._gripper_pen3 = wx.Pen(cs['frame_bg'])
self._gripper_size = 0
# Hint
self._hint_background_colour = wx.Colour(cs['frame_bg'])
# Caption bar
self._inactive_caption_colour = wx.Colour(cs['docker_bg'])
self._inactive_caption_gradient_colour = wx.Colour(cs['docker_bg'])
self._inactive_caption_text_colour = wx.Colour(cs['docker_fg'])
self._active_caption_colour = wx.Colour(cs['docker_bg'])
self._active_caption_gradient_colour = wx.Colour(cs['docker_bg'])
self._active_caption_text_colour = wx.Colour(cs['docker_fg'])
# self._caption_font
self._caption_size = 25
self._button_size = 20
class ThemeSwitcher(wx.Menu):
"""Class to make a submenu for switching theme, meaning that the menu will
always be the same across frames."""
def __init__(self, frame):
# Get list of themes
themePath = Path(prefs.paths['themes'])
themeList = {}
for themeFile in themePath.glob("*.json"):
try:
with open(themeFile, "rb") as fp:
theme = json.load(fp)
# Add themes to list only if min spec is defined
base = theme['base']
if all(key in base for key in ['bg', 'fg', 'font']):
themeList[themeFile.stem] = theme['info'] if "info" in theme else ""
except (FileNotFoundError, IsADirectoryError):
pass
# Make menu
wx.Menu.__init__(self)
# Make priority theme buttons
priority = ["PsychopyDark", "PsychopyLight", "ClassicDark", "Classic"]
for theme in priority:
tooltip = themeList.pop(theme)
item = self.AppendRadioItem(wx.ID_ANY, _translate(theme), tooltip)
# Bind to theme change method
frame.Bind(wx.EVT_MENU, frame.app.onThemeChange, item)
# Make other theme buttons
for theme in themeList:
item = self.AppendRadioItem(wx.ID_ANY, _translate(theme), help=themeList[theme])
frame.Bind(wx.EVT_MENU, frame.app.onThemeChange, item)
self.AppendSeparator()
# Add Theme Folder button
item = self.Append(wx.ID_ANY, _translate("Open theme folder"))
frame.Bind(wx.EVT_MENU, self.openThemeFolder, item)
def openThemeFolder(self, event):
# Choose a command according to OS
if sys.platform in ['win32']:
comm = "explorer"
elif sys.platform in ['darwin']:
comm = "open"
elif sys.platform in ['linux', 'linux2']:
comm = "dolphin"
# Use command to open themes folder
subprocess.call(f"{comm} {prefs.paths['themes']}", shell=True)
def _applyAppTheme(self):
for item in self.GetMenuItems():
if item.IsRadio(): # This means it will not attempt to check the separator
item.Check(item.ItemLabel.lower() == ThemeMixin.codetheme.lower())
| gpl-3.0 |
bert9bert/statsmodels | statsmodels/tsa/arima_process.py | 4 | 30886 | '''ARMA process and estimation with scipy.signal.lfilter
2009-09-06: copied from try_signal.py
reparameterized same as signal.lfilter (positive coefficients)
Notes
-----
* pretty fast
* checked with Monte Carlo and cross comparison with statsmodels yule_walker
for AR numbers are close but not identical to yule_walker
not compared to other statistics packages, no degrees of freedom correction
* ARMA(2,2) estimation (in Monte Carlo) requires longer time series to estimate parameters
without large variance. There might be different ARMA parameters
with similar impulse response function that cannot be well
distinguished with small samples (e.g. 100 observations)
* good for one time calculations for entire time series, not for recursive
prediction
* class structure not very clean yet
* many one-liners with scipy.signal, but takes time to figure out usage
* missing result statistics, e.g. t-values, but standard errors in examples
* no criteria for choice of number of lags
* no constant term in ARMA process
* no integration, differencing for ARIMA
* written without textbook, works but not sure about everything
briefly checked and it looks to be standard least squares, see below
* theoretical autocorrelation function of general ARMA
Done, relatively easy to guess solution, time consuming to get
theoretical test cases,
example file contains explicit formulas for acovf of MA(1), MA(2) and ARMA(1,1)
* two names for lag polynomials ar = rhoy, ma = rhoe ?
Properties:
Judge, ... (1985): The Theory and Practise of Econometrics
BigJudge p. 237ff:
If the time series process is a stationary ARMA(p,q), then
minimizing the sum of squares is asymptoticaly (as T-> inf)
equivalent to the exact Maximum Likelihood Estimator
Because Least Squares conditional on the initial information
does not use all information, in small samples exact MLE can
be better.
Without the normality assumption, the least squares estimator
is still consistent under suitable conditions, however not
efficient
Author: josefpktd
License: BSD
'''
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from scipy import signal, optimize, linalg
def arma_generate_sample(ar, ma, nsample, sigma=1, distrvs=np.random.randn,
burnin=0):
"""
Generate a random sample of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nsample : int
length of simulated time series
sigma : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations at the
beginning of the sample are dropped
Returns
-------
sample : array
sample of ARMA process given by ar, ma of length nsample
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -arparams] # add zero-lag and negate
>>> ma = np.r_[1, maparams] # add zero-lag
>>> y = sm.tsa.arma_generate_sample(ar, ma, 250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
#TODO: unify with ArmaProcess method
eta = sigma * distrvs(nsample+burnin)
return signal.lfilter(ma, ar, eta)[burnin:]
def arma_acovf(ar, ma, nobs=10):
'''theoretical autocovariance function of ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acovf
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
See Also
--------
arma_acf
acovf
Notes
-----
Tries to do some crude numerical speed improvements for cases
with high persistance. However, this algorithm is slow if the process is
highly persistent and only a few autocovariances are desired.
'''
#increase length of impulse response for AR closer to 1
#maybe cheap/fast enough to always keep nobs for ir large
if np.abs(np.sum(ar)-1) > 0.9:
nobs_ir = max(1000, 2 * nobs) # no idea right now how large is needed
else:
nobs_ir = max(100, 2 * nobs) # no idea right now
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#better save than sorry (?), I have no idea about the required precision
#only checked for AR(1)
while ir[-1] > 5*1e-5:
nobs_ir *= 10
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#again no idea where the speed break points are:
if nobs_ir > 50000 and nobs < 1001:
acovf = np.array([np.dot(ir[:nobs-t], ir[t:nobs])
for t in range(nobs)])
else:
acovf = np.correlate(ir, ir, 'full')[len(ir)-1:]
return acovf[:nobs]
def arma_acf(ar, ma, nobs=10):
'''theoretical autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acf
Returns
-------
acf : array
autocorrelation of ARMA process given by ar, ma
See Also
--------
arma_acovf
acf
acovf
'''
acovf = arma_acovf(ar, ma, nobs)
return acovf/acovf[0]
def arma_pacf(ar, ma, nobs=10):
'''partial autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned pacf
Returns
-------
pacf : array
partial autocorrelation of ARMA process given by ar, ma
Notes
-----
solves yule-walker equation for each lag order up to nobs lags
not tested/checked yet
'''
apacf = np.zeros(nobs)
acov = arma_acf(ar, ma, nobs=nobs+1)
apacf[0] = 1.
for k in range(2, nobs+1):
r = acov[:k]
apacf[k-1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1]
return apacf
def arma_periodogram(ar, ma, worN=None, whole=0):
'''periodogram for ARMA process given by lag-polynomials ar and ma
Parameters
----------
ar : array_like
autoregressive lag-polynomial with leading 1 and lhs sign
ma : array_like
moving average lag-polynomial with leading 1
worN : {None, int}, optional
option for scipy.signal.freqz (read "w or N")
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : {0,1}, optional
options for scipy.signal.freqz
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is non-zero compute frequencies from 0 to 2*pi.
Returns
-------
w : array
frequencies
sd : array
periodogram, spectral density
Notes
-----
Normalization ?
This uses signal.freqz, which does not use fft. There is a fft version
somewhere.
'''
w, h = signal.freqz(ma, ar, worN=worN, whole=whole)
sd = np.abs(h)**2/np.sqrt(2*np.pi)
if np.sum(np.isnan(h)) > 0:
# this happens with unit root or seasonal unit root'
print('Warning: nan in frequency response h, maybe a unit root')
return w, sd
def arma_impulse_response(ar, ma, nobs=100):
'''get the impulse response function (MA representation) for ARMA process
Parameters
----------
ma : array_like, 1d
moving average lag polynomial
ar : array_like, 1d
auto regressive lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ir : array, 1d
impulse response function with nobs elements
Notes
-----
This is the same as finding the MA representation of an ARMA(p,q).
By reversing the role of ar and ma in the function arguments, the
returned result is the AR representation of an ARMA(p,q), i.e
ma_representation = arma_impulse_response(ar, ma, nobs=100)
ar_representation = arma_impulse_response(ma, ar, nobs=100)
fully tested against matlab
Examples
--------
AR(1)
>>> arma_impulse_response([1.0, -0.8], [1.], nobs=10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
this is the same as
>>> 0.8**np.arange(10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
MA(2)
>>> arma_impulse_response([1.0], [1., 0.5, 0.2], nobs=10)
array([ 1. , 0.5, 0.2, 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
ARMA(1,2)
>>> arma_impulse_response([1.0, -0.8], [1., 0.5, 0.2], nobs=10)
array([ 1. , 1.3 , 1.24 , 0.992 , 0.7936 ,
0.63488 , 0.507904 , 0.4063232 , 0.32505856, 0.26004685])
'''
impulse = np.zeros(nobs)
impulse[0] = 1.
return signal.lfilter(ma, ar, impulse)
#alias, easier to remember
arma2ma = arma_impulse_response
#alias, easier to remember
def arma2ar(ar, ma, nobs=100):
'''get the AR representation of an ARMA process
Parameters
----------
ar : array_like, 1d
auto regressive lag polynomial
ma : array_like, 1d
moving average lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ar : array, 1d
coefficients of AR lag polynomial with nobs elements
Notes
-----
This is just an alias for
``ar_representation = arma_impulse_response(ma, ar, nobs=100)`` which has
been fully tested against MATLAB.
Examples
--------
'''
return arma_impulse_response(ma, ar, nobs=nobs)
#moved from sandbox.tsa.try_fi
def ar2arma(ar_des, p, q, n=20, mse='ar', start=None):
'''find arma approximation to ar process
This finds the ARMA(p,q) coefficients that minimize the integrated
squared difference between the impulse_response functions
(MA representation) of the AR and the ARMA process. This does
currently not check whether the MA lagpolynomial of the ARMA
process is invertible, neither does it check the roots of the AR
lagpolynomial.
Parameters
----------
ar_des : array_like
coefficients of original AR lag polynomial, including lag zero
p, q : int
length of desired ARMA lag polynomials
n : int
number of terms of the impuls_response function to include in the
objective function for the approximation
mse : string, 'ar'
not used yet,
Returns
-------
ar_app, ma_app : arrays
coefficients of the AR and MA lag polynomials of the approximation
res : tuple
result of optimize.leastsq
Notes
-----
Extension is possible if we want to match autocovariance instead
of impulse response function.
TODO: convert MA lag polynomial, ma_app, to be invertible, by mirroring
roots outside the unit intervall to ones that are inside. How do we do
this?
'''
#p,q = pq
def msear_err(arma, ar_des):
ar, ma = np.r_[1, arma[:p-1]], np.r_[1, arma[p-1:]]
ar_approx = arma_impulse_response(ma, ar, n)
## print(ar,ma)
## print(ar_des.shape, ar_approx.shape)
## print(ar_des)
## print(ar_approx)
return (ar_des - ar_approx) # ((ar - ar_approx)**2).sum()
if start is None:
arma0 = np.r_[-0.9 * np.ones(p-1), np.zeros(q-1)]
else:
arma0 = start
res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000)
#print(res)
arma_app = np.atleast_1d(res[0])
ar_app = np.r_[1, arma_app[:p-1]],
ma_app = np.r_[1, arma_app[p-1:]]
return ar_app, ma_app, res
def lpol2index(ar):
'''remove zeros from lagpolynomial, squeezed representation with index
Parameters
----------
ar : array_like
coefficients of lag polynomial
Returns
-------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
'''
ar = np.asarray(ar)
index = np.nonzero(ar)[0]
coeffs = ar[index]
return coeffs, index
def index2lpol(coeffs, index):
'''expand coefficients to lag poly
Parameters
----------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
ar : array_like
coefficients of lag polynomial
Returns
-------
ar : array_like
coefficients of lag polynomial
'''
n = max(index)
ar = np.zeros(n)
ar[index] = coeffs
return ar
#moved from sandbox.tsa.try_fi
def lpol_fima(d, n=20):
'''MA representation of fractional integration
.. math:: (1-L)^{-d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ma : array
coefficients of lag polynomial
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
return np.exp(gammaln(d+j) - gammaln(j+1) - gammaln(d))
#moved from sandbox.tsa.try_fi
def lpol_fiar(d, n=20):
'''AR representation of fractional integration
.. math:: (1-L)^{d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ar : array
coefficients of lag polynomial
Notes:
first coefficient is 1, negative signs except for first term,
ar(L)*x_t
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
ar = - np.exp(gammaln(-d+j) - gammaln(j+1) - gammaln(-d))
ar[0] = 1
return ar
#moved from sandbox.tsa.try_fi
def lpol_sdiff(s):
'''return coefficients for seasonal difference (1-L^s)
just a trivial convenience function
Parameters
----------
s : int
number of periods in season
Returns
-------
sdiff : list, length s+1
'''
return [1] + [0]*(s-1) + [-1]
def deconvolve(num, den, n=None):
"""Deconvolves divisor out of signal, division of polynomials for n terms
calculates den^{-1} * num
Parameters
----------
num : array_like
signal or lag polynomial
denom : array_like
coefficients of lag polynomial (linear filter)
n : None or int
number of terms of quotient
Returns
-------
quot : array
quotient or filtered series
rem : array
remainder
Notes
-----
If num is a time series, then this applies the linear filter den^{-1}.
If both num and den are both lagpolynomials, then this calculates the
quotient polynomial for n terms and also returns the remainder.
This is copied from scipy.signal.signaltools and added n as optional
parameter.
"""
num = np.atleast_1d(num)
den = np.atleast_1d(den)
N = len(num)
D = len(den)
if D > N and n is None:
quot = []
rem = num
else:
if n is None:
n = N-D+1
input = np.zeros(n, float)
input[0] = 1
quot = signal.lfilter(num, den, input)
num_approx = signal.convolve(den, quot, mode='full')
if len(num) < len(num_approx): # 1d only ?
num = np.concatenate((num, np.zeros(len(num_approx)-len(num))))
rem = num - num_approx
return quot, rem
class ArmaProcess(object):
"""
Represent an ARMA process for given lag-polynomials
This is a class to bring together properties of the process.
It does not do any estimation or statistical analysis.
Parameters
----------
ar : array_like, 1d
Coefficient for autoregressive lag polynomial, including zero lag.
See the notes for some information about the sign.
ma : array_like, 1d
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample is
generated. See example.
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -ar] # add zero-lag and negate
>>> ma = np.r_[1, ma] # add zero-lag
>>> arma_process = sm.tsa.ArmaProcess(ar, ma)
>>> arma_process.isstationary
True
>>> arma_process.isinvertible
True
>>> y = arma_process.generate_sample(250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
# maybe needs special handling for unit roots
def __init__(self, ar, ma, nobs=100):
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.arcoefs = -self.ar[1:]
self.macoefs = self.ma[1:]
self.arpoly = np.polynomial.Polynomial(self.ar)
self.mapoly = np.polynomial.Polynomial(self.ma)
self.nobs = nobs
@classmethod
def from_coeffs(cls, arcoefs, macoefs, nobs=100):
"""
Create ArmaProcess instance from coefficients of the lag-polynomials
Parameters
----------
arcoefs : array-like
Coefficient for autoregressive lag polynomial, not including zero
lag. The sign is inverted to conform to the usual time series
representation of an ARMA process in statistics. See the class
docstring for more information.
macoefs : array-like
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample
is generated.
"""
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
@classmethod
def from_estimation(cls, model_results, nobs=None):
"""
Create ArmaProcess instance from ARMA estimation results
Parameters
----------
model_results : ARMAResults instance
A fitted model
nobs : int, optional
If None, nobs is taken from the results
"""
arcoefs = model_results.arparams
macoefs = model_results.maparams
nobs = nobs or model_results.nobs
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
def __mul__(self, oth):
if isinstance(oth, self.__class__):
ar = (self.arpoly * oth.arpoly).coef
ma = (self.mapoly * oth.mapoly).coef
else:
try:
aroth, maoth = oth
arpolyoth = np.polynomial.Polynomial(aroth)
mapolyoth = np.polynomial.Polynomial(maoth)
ar = (self.arpoly * arpolyoth).coef
ma = (self.mapoly * mapolyoth).coef
except:
print('other is not a valid type')
raise
return self.__class__(ar, ma, nobs=self.nobs)
def __repr__(self):
return 'ArmaProcess(%r, %r, nobs=%d)' % (self.ar.tolist(),
self.ma.tolist(),
self.nobs)
def __str__(self):
return 'ArmaProcess\nAR: %r\nMA: %r' % (self.ar.tolist(),
self.ma.tolist())
def acovf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acovf(self.ar, self.ma, nobs=nobs)
acovf.__doc__ = arma_acovf.__doc__
def acf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acf(self.ar, self.ma, nobs=nobs)
acf.__doc__ = arma_acf.__doc__
def pacf(self, nobs=None):
nobs = nobs or self.nobs
return arma_pacf(self.ar, self.ma, nobs=nobs)
pacf.__doc__ = arma_pacf.__doc__
def periodogram(self, nobs=None):
nobs = nobs or self.nobs
return arma_periodogram(self.ar, self.ma, worN=nobs)
periodogram.__doc__ = arma_periodogram.__doc__
def impulse_response(self, nobs=None):
nobs = nobs or self.nobs
return arma_impulse_response(self.ar, self.ma, worN=nobs)
impulse_response.__doc__ = arma_impulse_response.__doc__
def arma2ma(self, nobs=None):
nobs = nobs or self.nobs
return arma2ma(self.ar, self.ma, nobs=nobs)
arma2ma.__doc__ = arma2ma.__doc__
def arma2ar(self, nobs=None):
nobs = nobs or self.nobs
return arma2ar(self.ar, self.ma, nobs=nobs)
arma2ar.__doc__ = arma2ar.__doc__
@property
def arroots(self):
"""
Roots of autoregressive lag-polynomial
"""
return self.arpoly.roots()
@property
def maroots(self):
"""
Roots of moving average lag-polynomial
"""
return self.mapoly.roots()
@property
def isstationary(self):
'''Arma process is stationary if AR roots are outside unit circle
Returns
-------
isstationary : boolean
True if autoregressive roots are outside unit circle
'''
if np.all(np.abs(self.arroots) > 1):
return True
else:
return False
@property
def isinvertible(self):
'''Arma process is invertible if MA roots are outside unit circle
Returns
-------
isinvertible : boolean
True if moving average roots are outside unit circle
'''
if np.all(np.abs(self.maroots) > 1):
return True
else:
return False
def invertroots(self, retnew=False):
'''make MA polynomial invertible by inverting roots inside unit circle
Parameters
----------
retnew : boolean
If False (default), then return the lag-polynomial as array.
If True, then return a new instance with invertible MA-polynomial
Returns
-------
manew : array
new invertible MA lag-polynomial, returned if retnew is false.
wasinvertible : boolean
True if the MA lag-polynomial was already invertible, returned if
retnew is false.
armaprocess : new instance of class
If retnew is true, then return a new instance with invertible
MA-polynomial
'''
#TODO: variable returns like this?
pr = self.ma_roots()
insideroots = np.abs(pr) < 1
if insideroots.any():
pr[np.abs(pr) < 1] = 1./pr[np.abs(pr) < 1]
pnew = np.polynomial.Polynomial.fromroots(pr)
mainv = pnew.coef/pnew.coef[0]
wasinvertible = False
else:
mainv = self.ma
wasinvertible = True
if retnew:
return self.__class__(self.ar, mainv, nobs=self.nobs)
else:
return mainv, wasinvertible
def generate_sample(self, nsample=100, scale=1., distrvs=None, axis=0,
burnin=0):
'''generate ARMA samples
Parameters
----------
nsample : int or tuple of ints
If nsample is an integer, then this creates a 1d timeseries of
length size. If nsample is a tuple, then the timeseries is along
axis. All other axis have independent arma samples.
scale : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations
at the beginning of the sample are dropped
axis : int
See nsample.
Returns
-------
rvs : ndarray
random sample(s) of arma process
Notes
-----
Should work for n-dimensional with time series along axis, but not
tested yet. Processes are sampled independently.
'''
if distrvs is None:
distrvs = np.random.normal
if np.ndim(nsample) == 0:
nsample = [nsample]
if burnin:
#handle burin time for nd arrays
#maybe there is a better trick in scipy.fft code
newsize = list(nsample)
newsize[axis] += burnin
newsize = tuple(newsize)
fslice = [slice(None)]*len(newsize)
fslice[axis] = slice(burnin, None, None)
fslice = tuple(fslice)
else:
newsize = tuple(nsample)
fslice = tuple([slice(None)]*np.ndim(newsize))
eta = scale * distrvs(size=newsize)
return signal.lfilter(self.ma, self.ar, eta, axis=axis)[fslice]
__all__ = ['arma_acf', 'arma_acovf', 'arma_generate_sample',
'arma_impulse_response', 'arma2ar', 'arma2ma', 'deconvolve',
'lpol2index', 'index2lpol']
if __name__ == '__main__':
# Simulate AR(1)
#--------------
# ar * y = ma * eta
ar = [1, -0.8]
ma = [1.0]
# generate AR data
eta = 0.1 * np.random.randn(1000)
yar1 = signal.lfilter(ar, ma, eta)
print("\nExample 0")
arest = ARIMAProcess(yar1)
rhohat, cov_x, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat)
print(cov_x)
print("\nExample 1")
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arest.generate_sample(ar,ma,1000,0.1)
arest = ARIMAProcess(y1)
rhohat1, cov_x1, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat1)
print(cov_x1)
err1 = arest.errfn(x=y1)
print(np.var(err1))
import statsmodels.api as sm
print(sm.regression.yule_walker(y1, order=2, inv=True))
print("\nExample 2")
nsample = 1000
ar = [1.0, -0.6, -0.1]
ma = [1.0, 0.3, 0.2]
y2 = ARIMA.generate_sample(ar,ma,nsample,0.1)
arest2 = ARIMAProcess(y2)
rhohat2, cov_x2, infodict, mesg, ier = arest2.fit((1,0,2))
print(rhohat2)
print(cov_x2)
err2 = arest.errfn(x=y2)
print(np.var(err2))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2,0,2))
print(rhohat2a)
print(cov_x2a)
err2a = arest.errfn(x=y2)
print(np.var(err2a))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y2, order=2, inv=True))
print("\nExample 20")
nsample = 1000
ar = [1.0]#, -0.8, -0.4]
ma = [1.0, 0.5, 0.2]
y3 = ARIMA.generate_sample(ar,ma,nsample,0.01)
arest20 = ARIMAProcess(y3)
rhohat3, cov_x3, infodict, mesg, ier = arest20.fit((2,0,0))
print(rhohat3)
print(cov_x3)
err3 = arest20.errfn(x=y3)
print(np.var(err3))
print(np.sqrt(np.dot(err3,err3)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
rhohat3a, cov_x3a, infodict, mesg, ier = arest20.fit((0,0,2))
print(rhohat3a)
print(cov_x3a)
err3a = arest20.errfn(x=y3)
print(np.var(err3a))
print(np.sqrt(np.dot(err3a,err3a)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y3, order=2, inv=True))
print("\nExample 02")
nsample = 1000
ar = [1.0, -0.8, 0.4] #-0.8, -0.4]
ma = [1.0]#, 0.8, 0.4]
y4 = ARIMA.generate_sample(ar,ma,nsample)
arest02 = ARIMAProcess(y4)
rhohat4, cov_x4, infodict, mesg, ier = arest02.fit((2,0,0))
print(rhohat4)
print(cov_x4)
err4 = arest02.errfn(x=y4)
print(np.var(err4))
sige = np.sqrt(np.dot(err4,err4)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4)))
print(np.sqrt(np.diag(cov_x4)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
rhohat4a, cov_x4a, infodict, mesg, ier = arest02.fit((0,0,2))
print(rhohat4a)
print(cov_x4a)
err4a = arest02.errfn(x=y4)
print(np.var(err4a))
sige = np.sqrt(np.dot(err4a,err4a)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4a)))
print(np.sqrt(np.diag(cov_x4a)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
import statsmodels.api as sm
print(sm.regression.yule_walker(y4, order=2, method='mle', inv=True))
import matplotlib.pyplot as plt
plt.plot(arest2.forecast()[-100:])
#plt.show()
ar1, ar2 = ([1, -0.4], [1, 0.5])
ar2 = [1, -1]
lagpolyproduct = np.convolve(ar1, ar2)
print(deconvolve(lagpolyproduct, ar2, n=None))
print(signal.deconvolve(lagpolyproduct, ar2))
print(deconvolve(lagpolyproduct, ar2, n=10))
| bsd-3-clause |
pklaus/Arduino-Logger | ADC_Simulation.py | 1 | 1447 | #!/usr/bin/env python
### Script for Python
### Helps to size the serial reference resistors when using
### an NTC to measure temperatures with an ADC.
from __future__ import division
from matplotlib import pyplot as plt
import numpy as np
from munch import Munch
import pdb; pdb.set_trace()
# Properties of the NTC:
#NTC = dict(B=3625, R0 = 10000, T0 = 25+273) # Buerklin NTC thermistor 10K, 3625K, 0.75 mW/K, B57550G1103F005
NTC = Munch(B=3977, R0 = 10000, T0 = 25+273) # Reichelt NTC-0,2 10K
# Properties of the measurement circuit:
#Rref = 500; Uvcc = 4.6; ADCbits = 10; # Arduino / Ausheizlogger
#Rref = 10000; Uvcc = 5.; ADCbits = 10; # Arduino / Raumtemperatur-Logger
Rref = 50000; Uvcc = 5.; ADCbits = 10; # Arduino / Cooling Station
# Map the measurement range of the ADC:
#Um = range(0., Uvcc, Uvcc / /(2**ADCbits-1.))
Um = np.linspace(0., Uvcc, num=(2**ADCbits-1))
# remove the first and last element:
Um = Um[1:-1]
# Calculate the distinguishable resistance values of the NTC:
Rt = Rref*(Uvcc / Um - 1) # by measuring the voltage accross the ref res.
#Rt = Rref / (Uvcc / Um - 1) # measuring the voltage across the NTC
# Calculate the temperature values belonging to those NTC resistances:
T = 1./(np.log(Rt/NTC.R0)/NTC.B + 1/NTC.T0 ) - 273.15
# Plot the temperatures versus their differences
plt.scatter(T[:-1], np.diff(T))
plt.show()
# Plot the Temperatures vs. discrete ADC values
#scatter([1:2^ADCbits-2],T)
| gpl-3.0 |
BrandoJS/Paparazzi_vtol | sw/airborne/test/ahrs/ahrs_utils.py | 4 | 5157 | #! /usr/bin/env python
# $Id$
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#import os
#from optparse import OptionParser
#import scipy
#from scipy import optimize
import shlex, subprocess
from pylab import *
from array import array
import numpy
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print "\nBuilding ahrs"
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_"+ahrs_type] + build_opt
# print args
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print " # "+i,
print
print "Running simulation"
print " using traj " + str(traj_nb)
p = subprocess.Popen(args=["./run_ahrs_on_synth",str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print " "+i,
# print "\n"
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')
]
pos_data_type = [ ('x0_true', 'float32'), ('y0_true', 'float32'), ('z0_true', 'float32'),
('x1_true', 'float32'), ('y1_true', 'float32'), ('z1_true', 'float32'),
('x2_true', 'float32'), ('y2_true', 'float32'), ('z2_true', 'float32'),
('x3_true', 'float32'), ('y3_true', 'float32'), ('z3_true', 'float32'),
]
mydescr = numpy.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print " "+line,
else:
fields = line.strip().split(' ');
# print fields
for i, number in enumerate(fields):
data[i].append(number)
print
for i in xrange(len(mydescr)):
data[i] = cast[mydescr[i]](data[i])
return numpy.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, sim_res):
print "Plotting Results"
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
subplot(3,3,1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty)
ylabel('degres')
title('phi')
subplot(3,3,2)
plot(sim_res.time, sim_res.theta_ahrs, lsty)
title('theta')
subplot(3,3,3)
plot(sim_res.time, sim_res.psi_ahrs, lsty)
title('psi')
subplot(3,3,4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
ylabel('degres/s')
title('p')
subplot(3,3,5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
title('q')
subplot(3,3,6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
title('r')
subplot(3,3,7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
ylabel('degres/s')
xlabel('time in s')
title('bp')
subplot(3,3,8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
xlabel('time in s')
title('bq')
subplot(3,3,9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
xlabel('time in s')
title('br')
if plot_true_state:
subplot(3,3,1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
subplot(3,3,2)
plot(sim_res.time, sim_res.theta_true, 'r--')
subplot(3,3,3)
plot(sim_res.time, sim_res.psi_true, 'r--')
subplot(3,3,4)
plot(sim_res.time, sim_res.p_true, 'r--')
subplot(3,3,5)
plot(sim_res.time, sim_res.q_true, 'r--')
subplot(3,3,6)
plot(sim_res.time, sim_res.r_true, 'r--')
subplot(3,3,7)
plot(sim_res.time, sim_res.bp_true, 'r--')
subplot(3,3,8)
plot(sim_res.time, sim_res.bq_true, 'r--')
subplot(3,3,9)
plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show();
| gpl-2.0 |
pranjan77/pyms | Display/Class.py | 7 | 10046 | """
Class to Display Ion Chromatograms and TIC
"""
#############################################################################
# #
# PyMS software for processing of metabolomic mass-spectrometry data #
# Copyright (C) 2005-2012 Vladimir Likic #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. #
# #
#############################################################################
import matplotlib.pyplot as plt
import numpy
import sys
sys.path.append('/x/PyMS/')
from pyms.GCMS.Class import IonChromatogram
from pyms.Utils.Error import error
class Display(object):
"""
@summary: Class to display Ion Chromatograms and Total
Ion Chromatograms from GCMS.Class.IonChromatogram
Uses matplotlib module pyplot to do plotting
@author: Sean O'Callaghan
@author: Vladimir Likic
"""
def __init__(self):
"""
@summary: Initialises an instance of Display class
"""
# Container to store plots
self.__tic_ic_plots = []
# color dictionary for plotting of ics; blue reserved
# for TIC
self.__col_ic = {0:'r', 1:'g', 2:'k', 3:'y', 4:'m', 5:'c'}
self.__col_count = 0 # counter to keep track of colors
# Peak list container
self.__peak_list = []
#Plotting Variables
self.__fig = plt.figure()
self.__ax = self.__fig.add_subplot(111)
def plot_ics(self, ics, labels = None):
"""
@summary: Adds an Ion Chromatogram or a
list of Ion Chromatograms to plot list
@param ics: List of Ion Chromatograms m/z channels
for plotting
@type ics: list of pyms.GCMS.Class.IonChromatogram
@param labels: Labels for plot legend
@type labels: list of StringType
"""
if not isinstance(ics, list):
if isinstance(ics, IonChromatogram):
ics = [ics]
else:
error("ics argument must be an IonChromatogram\
or a list of Ion Chromatograms")
if not isinstance(labels, list) and labels != None:
labels = [labels]
# TODO: take care of case where one element of ics is
# not an IonChromatogram
intensity_list = []
time_list = ics[0].get_time_list()
for i in range(len(ics)):
intensity_list.append(ics[i].get_intensity_array())
# Case for labels not present
if labels == None:
for i in range(len(ics)):
self.__tic_ic_plots.append(plt.plot(time_list, \
intensity_list[i], self.__col_ic[self.__col_count]))
if self.__col_count == 5:
self.__col_count = 0
else:
self.__col_count += 1
# Case for labels present
else:
for i in range(len(ics)):
self.__tic_ic_plots.append(plt.plot(time_list, \
intensity_list[i], self.__col_ic[self.__col_count]\
, label = labels[i]))
if self.__col_count == 5:
self.__col_count = 0
else:
self.__col_count += 1
def plot_tic(self, tic, label=None):
"""
@summary: Adds Total Ion Chromatogram to plot list
@param tic: Total Ion Chromatogram
@type tic: pyms.GCMS.Class.IonChromatogram
@param label: label for plot legend
@type label: StringType
"""
if not isinstance(tic, IonChromatogram):
error("TIC is not an Ion Chromatogram object")
intensity_list = tic.get_intensity_array()
time_list = tic.get_time_list()
self.__tic_ic_plots.append(plt.plot(time_list, intensity_list,\
label=label))
def plot_peaks(self, peak_list, label = "Peaks"):
"""
@summary: Plots the locations of peaks as found
by PyMS.
@param peak_list: List of peaks
@type peak_list: list of pyms.Peak.Class.Peak
@param label: label for plot legend
@type label: StringType
"""
if not isinstance(peak_list, list):
error("peak_list is not a list")
time_list = []
height_list=[]
# Copy to self.__peak_list for onclick event handling
self.__peak_list = peak_list
for peak in peak_list:
time_list.append(peak.get_rt())
height_list.append(sum(peak.get_mass_spectrum().mass_spec))
self.__tic_ic_plots.append(plt.plot(time_list, height_list, 'o',\
label = label))
def get_5_largest(self, intensity_list):
"""
@summary: Computes the indices of the largest 5 ion intensities
for writing to console
@param intensity_list: List of Ion intensities
@type intensity_list: listType
"""
largest = [0,0,0,0,0,0,0,0,0,0]
# Find out largest value
for i in range(len(intensity_list)):
if intensity_list[i] > intensity_list[largest[0]]:
largest[0] = i
# Now find next four largest values
for j in [1,2,3,4,5,6,7,8,9]:
for i in range(len(intensity_list)):
if intensity_list[i] > intensity_list[largest[j]] and \
intensity_list[i] < intensity_list[largest[j-1]]:
largest[j] = i
return largest
def plot_mass_spec(self, rt, mass_list, intensity_list):
"""
@summary: Plots the mass spec given a list of masses and intensities
@param rt: The retention time for labelling of the plot
@type rt: floatType
@param mass_list: list of masses of the MassSpectrum object
@type mass_list: listType
@param intensity_list: List of intensities of the MassSpectrum object
@type intensity_list: listType
"""
new_fig = plt.figure()
new_ax = new_fig.add_subplot(111)
# to set x axis range find minimum and maximum m/z channels
max_mz = mass_list[0]
min_mz = mass_list[0]
for i in range(len(mass_list)):
if mass_list[i] > max_mz:
max_mz = mass_list[i]
for i in range(len(mass_list)):
if mass_list[i] < min_mz:
min_mz = mass_list[i]
label = "Mass spec for peak at time " + "%5.2f" % rt
mass_spec_plot = plt.bar(mass_list, intensity_list,\
label=label, width=0.01)
x_axis_range = plt.xlim(min_mz, max_mz)
t = new_ax.set_title(label)
plt.show()
def onclick(self, event):
"""
@summary: Finds the 5 highest intensity m/z channels for the selected peak.
The peak is selected by clicking on it. If a button other than
the left one is clicked, a new plot of the mass spectrum is displayed
@param event: a mouse click by the user
"""
intensity_list = []
mass_list = []
for peak in self.__peak_list:
if event.xdata > 0.9999*peak.get_rt() and event.xdata < \
1.0001*peak.get_rt():
intensity_list = peak.get_mass_spectrum().mass_spec
mass_list = peak.get_mass_spectrum().mass_list
largest = self.get_5_largest(intensity_list)
if len(intensity_list) != 0:
print "mass\t intensity"
for i in range(10):
print mass_list[largest[i]], "\t", intensity_list[largest[i]]
else: # if the selected point is not close enough to peak
print "No Peak at this point"
# Check if a button other than left was pressed, if so plot mass spectrum
# Also check that a peak was selected, not just whitespace
if event.button != 1 and len(intensity_list) != 0:
self.plot_mass_spec(event.xdata, mass_list, intensity_list)
def do_plotting(self, plot_label = None):
"""
@summary: Plots TIC and IC(s) if they have been created
by plot_tic() or plot_ics(). Adds detected peaks
if they have been added by plot_peaks()
@param plot_label: Optional to supply a label or other
definition of data origin
@type plot_label: StringType
"""
# if no plots have been created advise user
if len(self.__tic_ic_plots) == 0:
print 'No plots have been created'
print 'Please call a plotting function before'
print 'calling do_plotting()'
if plot_label != None :
t = self.__ax.set_title(plot_label)
l = self.__ax.legend()
self.__fig.canvas.draw
# If no peak list plot, no mouse click event
if len(self.__peak_list) != 0:
cid = self.__fig.canvas.mpl_connect('button_press_event', self.onclick)
plt.show()
| gpl-2.0 |
JoshuaMichaelKing/MyLearning | learn-python2.7/keras/neural_network_demo.py | 1 | 2698 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import pandas as pd
from keras.models import Sequential
from keras.layers.core import Dense, Activation
__version__ = '0.0.1'
__license__ = 'MIT'
__author__ = 'Joshua Guo (1992gq@gmail.com)'
'Python To Try Neural Network through keras(based on theano)!'
def main():
tipdm_chapter5_nn_test()
def tipdm_chapter5_nn_test():
# 参数初始化
filename = '../../../MyFile/chapter5/data/sales_data.xls'
data = pd.read_excel(filename, index_col = u'序号') # 导入数据
# 数据是类别标签,要将它转化为数据形式
# 对于属性“高”、“好”和“是”使用1表示,对于“低”、“坏”和“否”使用-1表示
data[data == u'高'] = 1
data[data == u'是'] = 1
data[data == u'好'] = 1
data[data != 1] = -1
x = data.iloc[:,:3].as_matrix().astype(int)
y = data.iloc[:,3].as_matrix().astype(int)
# model and training
# 三个输入节点,10个隐含节点,一个输出节点
model = Sequential()
model.add(Dense(10, input_dim = 3))
model.add(Activation('relu')) # 用relu作为激活函数,可以大幅提高准确度
model.add(Dense(1, input_dim = 10))
model.add(Activation('sigmoid')) # 由于是0-1输出,用sigmoid函数作为激活函数
# compilation before training : configure the learning process
# 此处为二元分类,所以我们指定损失函数为binary_crossentropy,以及模式为bianry
# 另外常见的损失函数还有mean_squared_error, categorical_crossentropy等
# 求解方法我们指定adam,此外还有sgd,rmsprop等可选
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', class_mode = 'binary')
# training and predict
model.fit(x, y, nb_epoch = 500, batch_size = 10) # 训练模型,学习一千次
yp = model.predict_classes(x).reshape(len(y)) #分类预测
cm_plot(y, yp).show()
def cm_plot(y, yp):
'''
自行编写的混淆矩阵可视化函数
'''
from sklearn.metrics import confusion_matrix #导入混淆矩阵函数
cm = confusion_matrix(y, yp) #混淆矩阵
import matplotlib.pyplot as plt #导入作图库
plt.matshow(cm, cmap = plt.cm.Greens) #画混淆矩阵图,配色风格使用cm.Greens,更多风格请参考官网。
plt.colorbar() #颜色标签
for x in range(len(cm)): #数据标签
for y in range(len(cm)):
plt.annotate(cm[x,y], xy = (x, y), horizontalalignment = 'center', verticalalignment = 'center')
plt.ylabel('True label') #坐标轴标签
plt.xlabel('Predicted label') #坐标轴标签
return plt
if __name__ == '__main__':
main()
| mit |
felixekn/Team04_MicrobeControllers | Documentation/Calibration/calibration_plot.py | 1 | 2687 | from scipy import stats as st
from matplotlib import pyplot as pp
import matplotlib.patches as mpatches
import matplotlib as mpl
from matplotlib.ticker import Locator
import numpy as np
import operator
import csv
import collections
import math
import copy
# Figure - Rapid Optical Density Calibration Plot
# RODD = [0.01, 0.03, 0.07, 0.1, 0.17, 0.18, 0.26, 0.46, 0.76]
# SPEC = [0.0155, 0.0392, 0.0753, 0.1091, 0.1582, 0.1758, 0.2332, 0.4397, 0.7296]
RODD = [0.01, 0.05, 0.09, 0.12, 0.14, 0.16, 0.18, 0.34, 0.76]
SPEC = [0.0325, 0.08145, 0.13062, 0.16941, 0.18323, 0.21266, 0.23708, 0.41538, 0.92972]
MAPPED = [0.04, 0.09, 0.13, 0.16, 0.18, 0.21, 0.25, 0.41, 0.94]
linear = [0,0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
#------------- Scatter Expression plots -------------- #
figWidth = 5
figHight = 5
line_width = 3
marker_size = 10
axis_fontsize = 14
legend_fontsize = 10
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.1
mpl.rcParams['xtick.minor.size'] = 3
mpl.rcParams['xtick.minor.width'] = 1.1
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.1
mpl.rcParams['ytick.minor.size'] = 3
mpl.rcParams['ytick.minor.width'] = 1.1
mpl.rcParams['font.weight'] = 'light'
mpl.rcParams['font.sans-serif'] = 'Arial'
mpl.rcParams['text.usetex'] = False
cal_poly = np.polyfit(RODD, SPEC, 1)
cal_func = np.poly1d(cal_poly)
x_vals = np.linspace(0, 1, 50)
print(cal_func)
# --- Calibration Plot --- #
fig = pp.figure(figsize=(figWidth, figHight))
ax = pp.subplot()
p1 = ax.plot(RODD, SPEC, color = '#000000', linestyle = 'none',
marker = 'o', markersize = 8, markeredgecolor = 'none', label="Raw RODD OD")
# p3 = ax.plot(x_vals, cal_func(x_vals), color = "r", linewidth = 1, linestyle = '--', label="Spectro. Calibration Fit")
p4 = ax.plot(MAPPED, SPEC, color = "r", marker = 'o', markersize = 8, linestyle = 'none', label="Calibrated RODD OD600")
p2 = ax.plot(linear, linear, color = '#000000', linewidth = 1, linestyle ='--', label="1:1 RODD to OD600\n Relationship")
p1[0].set_clip_on(False)
ax.set_ylim([0,1])
xaxis = fig.gca().xaxis
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(2)
ax.set_xlabel('RODD Values (a.u.)', fontsize = 16)
ax.set_ylabel("Spectro. Absorbance (OD600)", fontsize = 16)
ax.tick_params(axis='x', labelsize=14)
ax.tick_params(axis='y', labelsize=14)
ax.set_title("RODD Calibration Curve", fontsize = 20)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, fontsize = 12, frameon=False, loc="upper left")
pp.tight_layout()
fig.savefig('calibration_curve.pdf')
| mit |
sameersingh/ml-discussions | week5/mltools/datagen.py | 2 | 4366 | import numpy as np
from numpy import loadtxt as loadtxt
from numpy import asarray as arr
from numpy import asmatrix as mat
from numpy import atleast_2d as twod
from scipy.linalg import sqrtm
################################################################################
## Methods for creating / sampling synthetic datasets ##########################
################################################################################
def data_gauss(N0, N1=None, mu0=arr([0, 0]), mu1=arr([1, 1]), sig0=np.eye(2), sig1=np.eye(2)):
"""Sample data from a two-component Gaussian mixture model.
Args:
N0 (int): Number of data to sample for class -1.
N1 :(int) Number of data to sample for class 1.
mu0 (arr): numpy array
mu1 (arr): numpy array
sig0 (arr): numpy array
sig1 (arr): numpy array
Returns:
X (array): Array of sampled data
Y (array): Array of class values that correspond to the data points in X.
TODO: test more
"""
if not N1:
N1 = N0
d1,d2 = twod(mu0).shape[1],twod(mu1).shape[1]
if d1 != d2 or np.any(twod(sig0).shape != arr([d1, d1])) or np.any(twod(sig1).shape != arr([d1, d1])):
raise ValueError('data_gauss: dimensions should agree')
X0 = np.dot(np.random.randn(N0, d1), sqrtm(sig0))
X0 += np.ones((N0,1)) * mu0
Y0 = -np.ones(N0)
X1 = np.dot(np.random.randn(N1, d1), sqrtm(sig1))
X1 += np.ones((N1,1)) * mu1
Y1 = np.ones(N1)
X = np.row_stack((X0,X1))
Y = np.concatenate((Y0,Y1))
return X,Y
def data_GMM(N, C, D=2, get_Z=False):
"""Sample data from a Gaussian mixture model.
Builds a random GMM with C components and draws M data x^{(i)} from a mixture
of Gaussians in D dimensions
Args:
N (int): Number of data to be drawn from a mixture of Gaussians.
C (int): Number of clusters.
D (int): Number of dimensions.
get_Z (bool): If True, returns a an array indicating the cluster from which each
data point was drawn.
Returns:
X (arr): N x D array of data.
Z (arr): 1 x N array of cluster ids; returned also only if get_Z=True
TODO: test more; N vs M
"""
C += 1
pi = np.zeros(C)
for c in range(C):
pi[c] = gamrand(10, 0.5)
pi = pi / np.sum(pi)
cpi = np.cumsum(pi)
rho = np.random.rand(D, D)
rho = rho + twod(rho).T
rho = rho + D * np.eye(D)
rho = sqrtm(rho)
mu = mat(np.random.randn(c, D)) * mat(rho)
ccov = []
for i in range(C):
tmp = np.random.rand(D, D)
tmp = tmp + tmp.T
tmp = 0.5 * (tmp + D * np.eye(D))
ccov.append(sqrtm(tmp))
p = np.random.rand(N)
Z = np.ones(N)
for c in range(C - 1):
Z[p > cpi[c]] = c
Z = Z.astype(int)
X = mu[Z,:]
for c in range(C):
X[Z == c,:] = X[Z == c,:] + mat(np.random.randn(np.sum(Z == c), D)) * mat(ccov[c])
if get_Z:
return (arr(X),Z)
else:
return arr(X)
def gamrand(alpha, lmbda):
"""Gamma(alpha, lmbda) generator using the Marsaglia and Tsang method
Args:
alpha (float): scalar
lambda (float): scalar
Returns:
(float) : scalar
TODO: test more
"""
# (algorithm 4.33).
if alpha > 1:
d = alpha - 1 / 3
c = 1 / np.sqrt(9 * d)
flag = 1
while flag:
Z = np.random.randn()
if Z > -1 / c:
V = (1 + c * Z)**3
U = np.random.rand()
flag = np.log(U) > (0.5 * Z**2 + d - d * V + d * np.log(V))
return d * V / lmbda
else:
x = gamrand(alpha + 1, lmbda)
return x * np.random.rand()**(1 / alpha)
def data_mouse():
"""Simple by-hand data generation using the GUI
Opens a matplotlib plot window, and allows the user to specify points with the mouse.
Each button is its own class (1,2,3); close the window when done creating data.
Returns:
X (arr): Mx2 array of data locations
Y (arr): Mx1 array of labels (buttons)
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(-1,2), ylim=(-1,2))
X = np.zeros( (0,2) )
Y = np.zeros( (0,) )
col = ['bs','gx','ro']
def on_click(event):
X.resize( (X.shape[0]+1,X.shape[1]) )
X[-1,:] = [event.xdata,event.ydata]
Y.resize( (Y.shape[0]+1,) )
Y[-1] = event.button
ax.plot( event.xdata, event.ydata, col[event.button-1])
fig.canvas.draw()
fig.canvas.mpl_connect('button_press_event',on_click)
inter=plt.isinteractive(); hld=plt.ishold();
plt.ioff(); plt.hold(True); plt.show();
if inter: plt.ion();
if not hld: plt.hold(False);
return X,Y
| apache-2.0 |
manashmndl/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
shangwuhencc/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
DSLituiev/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 37 | 12559 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int, the DictVectorizer can be
followed by OneHotEncoder to complete binary one-hot encoding.
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py | 72 | 6429 | """
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <mdroe@stsci.edu>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.