repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
WarrenWeckesser/scipy | scipy/spatial/_spherical_voronoi.py | 7 | 13692 | """
Spherical Voronoi Code
.. versionadded:: 0.18.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as SciPy.
#
import warnings
import numpy as np
import scipy
from . import _voronoi
from scipy.spatial import cKDTree
__all__ = ['SphericalVoronoi']
def calculate_solid_angles(R):
"""Calculates the solid angles of plane triangles. Implements the method of
Van Oosterom and Strackee [VanOosterom]_ with some modifications. Assumes
that input points have unit norm."""
# Original method uses a triple product `R1 . (R2 x R3)` for the numerator.
# This is equal to the determinant of the matrix [R1 R2 R3], which can be
# computed with better stability.
numerator = np.linalg.det(R)
denominator = 1 + (np.einsum('ij,ij->i', R[:, 0], R[:, 1]) +
np.einsum('ij,ij->i', R[:, 1], R[:, 2]) +
np.einsum('ij,ij->i', R[:, 2], R[:, 0]))
return np.abs(2 * np.arctan2(numerator, denominator))
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.18.0
Parameters
----------
points : ndarray of floats, shape (npoints, ndim)
Coordinates of points from which to construct a spherical
Voronoi diagram.
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (ndim,)
Center of sphere (Default: origin)
threshold : float
Threshold for detecting duplicate points and
mismatches between points and sphere parameters.
(Default: 1e-06)
Attributes
----------
points : double array of shape (npoints, ndim)
the points in `ndim` dimensions to generate the Voronoi diagram from
radius : double
radius of the sphere
center : double array of shape (ndim,)
center of the sphere
vertices : double array of shape (nvertices, ndim)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Methods
-------
calculate_areas
Calculates the areas of the Voronoi regions. For 2D point sets, the
regions are circular arcs. The sum of the areas is `2 * pi * radius`.
For 3D point sets, the regions are spherical polygons. The sum of the
areas is `4 * pi * radius**2`.
Raises
------
ValueError
If there are duplicates in `points`.
If the provided `radius` is not consistent with `points`.
Notes
-----
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
The Convex Hull neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement).
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
.. [VanOosterom] Van Oosterom and Strackee. The solid angle of a plane
triangle. IEEE Transactions on Biomedical Engineering,
2, 1983, pp 125--126.
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
Do some imports and take some points on a cube:
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi, geometric_slerp
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
Calculate the spherical Voronoi diagram:
>>> radius = 1
>>> center = np.array([0, 0, 0])
>>> sv = SphericalVoronoi(points, radius, center)
Generate plot:
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> t_vals = np.linspace(0, 1, 2000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... n = len(region)
... for i in range(n):
... start = sv.vertices[region][i]
... end = sv.vertices[region][(i + 1) % n]
... result = geometric_slerp(start, end, t_vals)
... ax.plot(result[..., 0],
... result[..., 1],
... result[..., 2],
... c='k')
>>> ax.azim = 10
>>> ax.elev = 40
>>> _ = ax.set_xticks([])
>>> _ = ax.set_yticks([])
>>> _ = ax.set_zticks([])
>>> fig.set_size_inches(4, 4)
>>> plt.show()
"""
def __init__(self, points, radius=1, center=None, threshold=1e-06):
if radius is None:
radius = 1.
warnings.warn('`radius` is `None`. '
'This will raise an error in a future version. '
'Please provide a floating point number '
'(i.e. `radius=1`).',
DeprecationWarning)
self.radius = float(radius)
self.points = np.array(points).astype(np.double)
self._dim = len(points[0])
if center is None:
self.center = np.zeros(self._dim)
else:
self.center = np.array(center, dtype=float)
# test degenerate input
self._rank = np.linalg.matrix_rank(self.points - self.points[0],
tol=threshold * self.radius)
if self._rank < self._dim:
raise ValueError("Rank of input points must be at least {0}".format(self._dim))
if cKDTree(self.points).query_pairs(threshold * self.radius):
raise ValueError("Duplicate generators present.")
radii = np.linalg.norm(self.points - self.center, axis=1)
max_discrepancy = np.abs(radii - self.radius).max()
if max_discrepancy >= threshold * self.radius:
raise ValueError("Radius inconsistent with generators.")
self._calc_vertices_regions()
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
# get Convex Hull
conv = scipy.spatial.ConvexHull(self.points)
# get circumcenters of Convex Hull triangles from facet equations
# for 3D input circumcenters will have shape: (2N-4, 3)
self.vertices = self.radius * conv.equations[:, :-1] + self.center
self._simplices = conv.simplices
# calculate regions from triangulation
# for 3D input simplex_indices will have shape: (2N-4,)
simplex_indices = np.arange(len(self._simplices))
# for 3D input tri_indices will have shape: (6N-12,)
tri_indices = np.column_stack([simplex_indices] * self._dim).ravel()
# for 3D input point_indices will have shape: (6N-12,)
point_indices = self._simplices.ravel()
# for 3D input indices will have shape: (6N-12,)
indices = np.argsort(point_indices, kind='mergesort')
# for 3D input flattened_groups will have shape: (6N-12,)
flattened_groups = tri_indices[indices].astype(np.intp)
# intervals will have shape: (N+1,)
intervals = np.cumsum(np.bincount(point_indices + 1))
# split flattened groups to get nested list of unsorted regions
groups = [list(flattened_groups[intervals[i]:intervals[i + 1]])
for i in range(len(intervals) - 1)]
self.regions = groups
def sort_vertices_of_regions(self):
"""Sort indices of the vertices to be (counter-)clockwise ordered.
Raises
------
TypeError
If the points are not three-dimensional.
Notes
-----
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the circumcenter of the k-th triangle
in self._simplices. For each region n, we choose the first triangle
(=Voronoi vertex) in self._simplices and a vertex of that triangle
not equal to the center n. These determine a unique neighbor of that
triangle, which is then chosen as the second triangle. The second
triangle will have a unique vertex not equal to the current vertex or
the center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
if self._dim != 3:
raise TypeError("Only supported for three-dimensional point sets")
_voronoi.sort_vertices_of_regions(self._simplices, self.regions)
def _calculate_areas_3d(self):
self.sort_vertices_of_regions()
sizes = [len(region) for region in self.regions]
csizes = np.cumsum(sizes)
num_regions = csizes[-1]
# We create a set of triangles consisting of one point and two Voronoi
# vertices. The vertices of each triangle are adjacent in the sorted
# regions list.
point_indices = [i for i, size in enumerate(sizes)
for j in range(size)]
nbrs1 = np.array([r for region in self.regions for r in region])
# The calculation of nbrs2 is a vectorized version of:
# np.array([r for region in self.regions for r in np.roll(region, 1)])
nbrs2 = np.roll(nbrs1, 1)
indices = np.roll(csizes, 1)
indices[0] = 0
nbrs2[indices] = nbrs1[csizes - 1]
# Normalize points and vertices.
pnormalized = (self.points - self.center) / self.radius
vnormalized = (self.vertices - self.center) / self.radius
# Create the complete set of triangles and calculate their solid angles
triangles = np.hstack([pnormalized[point_indices],
vnormalized[nbrs1],
vnormalized[nbrs2]
]).reshape((num_regions, 3, 3))
triangle_solid_angles = calculate_solid_angles(triangles)
# Sum the solid angles of the triangles in each region
solid_angles = np.cumsum(triangle_solid_angles)[csizes - 1]
solid_angles[1:] -= solid_angles[:-1]
# Get polygon areas using A = omega * r**2
return solid_angles * self.radius**2
def _calculate_areas_2d(self):
# Find start and end points of arcs
arcs = self.points[self._simplices] - self.center
# Calculate the angle subtended by arcs
cosine = np.einsum('ij,ij->i', arcs[:, 0], arcs[:, 1])
sine = np.abs(np.linalg.det(arcs))
theta = np.arctan2(sine, cosine)
# Get areas using A = r * theta
areas = self.radius * theta
# Correct arcs which go the wrong way (single-hemisphere inputs)
signs = np.sign(np.einsum('ij,ij->i', arcs[:, 0],
self.vertices - self.center))
indices = np.where(signs < 0)
areas[indices] = 2 * np.pi * self.radius - areas[indices]
return areas
def calculate_areas(self):
"""Calculates the areas of the Voronoi regions.
For 2D point sets, the regions are circular arcs. The sum of the areas
is `2 * pi * radius`.
For 3D point sets, the regions are spherical polygons. The sum of the
areas is `4 * pi * radius**2`.
.. versionadded:: 1.5.0
Returns
-------
areas : double array of shape (npoints,)
The areas of the Voronoi regions.
"""
if self._dim == 2:
return self._calculate_areas_2d()
elif self._dim == 3:
return self._calculate_areas_3d()
else:
raise TypeError("Only supported for 2D and 3D point sets")
| bsd-3-clause |
petebachant/MakeWaves | makewaves/wavemakerlimits.py | 1 | 3085 | """This module replicates the logic of the Regular Waves VI to calculate a
safe wave height.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import os
import sys
_thisdir = os.path.dirname(os.path.abspath(__file__))
settings_dir = os.path.join(_thisdir, "settings")
max_halfstroke = 0.16 # Was 0.16
flap_height = 3.3147
depth = 2.44
min_period = 0.5
max_period = 5.0
max_H_L = 0.1
max_H_d = 0.65
def dispsolver(rad_frequency, depth, decimals=2):
"""Solves for surface wavenumber to a specified number of decimals."""
g = 9.81
k = np.arange(0, 30, 10**-(decimals))
r = np.abs(rad_frequency**2 - g*k*np.tanh(k*depth))
return k[np.where(r == np.min(r))[0][0]]
def revdispsolver(wavenumber, depth, decimals=2):
"""Returns radian frequency given wavenumber and depth"""
g = 9.81
k = wavenumber
sigma = np.arange(0, 10, 10**-(decimals))
r = np.abs(sigma**2 - g*k*np.tanh(k*depth))
return sigma[np.where(r == np.min(r))[0][0]]
def height_to_stroke_amp(wave_height, period, flap_height, depth):
sigma = 2*np.pi/period
h = depth
k = dispsolver(sigma, h)
H = wave_height
S = H/(4*(np.sinh(k*h)/(k*h))*(k*h*np.sinh(k*h) - \
np.cosh(k*h) + 1)/(np.sinh(2*k*h) + 2*k*h))
return flap_height/depth*S/2.0
def stroke_amp_to_height(stroke_amp, period, flap_height, depth):
sigma = 2*np.pi/period
h = depth
k = dispsolver(sigma, h)
S = 2*stroke_amp*depth/flap_height
H = S*(4*(np.sinh(k*h)/(k*h))*(k*h*np.sinh(k*h) - \
np.cosh(k*h) + 1)/(np.sinh(2*k*h) + 2*k*h))
return H
def calc_safe_height(H, T):
sta_spec = height_to_stroke_amp(H, T, flap_height, depth)
# Wave height using max piston stroke
wh1 = stroke_amp_to_height(max_halfstroke, T, flap_height, depth)
# Wave height using max H/L
wh2 = max_H_L * 2*np.pi/dispsolver(2*np.pi/T, depth)
# Wave height using max H/d
wh3 = max_H_d * depth
# Stroke amplitude calculated using max H/L
sta2 = height_to_stroke_amp(wh2, T, flap_height, depth)
# Stroke amplitude calculated using max H/d
sta3 = height_to_stroke_amp(wh3, T, flap_height, depth)
if sta_spec > np.min([max_halfstroke, sta2, sta3]):
return np.min([wh1, wh2, wh3])
else:
return H
def findlimits(plot=False, save=True):
periods = np.arange(0.5, 5.01, 0.001)
mh = np.zeros(len(periods))
for n in range(len(periods)):
progress = np.round(n/len(periods)*100, decimals=2)
sys.stdout.write("\rComputing wavemaker limits... " \
+ str(progress)+"%")
sys.stdout.flush()
mh[n] = calc_safe_height(50, periods[n])
print("")
if plot:
import matplotlib.pyplot as plt
plt.plot(periods, mh)
if save:
if not os.path.isdir(settings_dir):
os.mkdir(settings_dir)
np.save(os.path.join(settings_dir, "periods"), periods)
np.save(os.path.join(settings_dir, "maxH"), mh)
return periods, mh
if __name__ == "__main__":
findlimits(plot=False, save=False)
| gpl-3.0 |
ageron/tensorflow | tensorflow/contrib/learn/__init__.py | 18 | 2695 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning (DEPRECATED).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNEstimator
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedEstimator
@@DNNLinearCombinedClassifier
@@DynamicRnnEstimator
@@LinearClassifier
@@LinearEstimator
@@LinearRegressor
@@LogisticRegressor
@@StateSavingRnnEstimator
@@SVM
@@SKCompat
@@Head
@@multi_class_head
@@multi_label_head
@@binary_svm_head
@@regression_head
@@poisson_regression_head
@@multi_head
@@no_op_train_fn
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@read_keyed_batch_examples
@@read_keyed_batch_examples_shared_queue
@@read_keyed_batch_features
@@read_keyed_batch_features_shared_queue
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.contrib.learn.python.learn import learn_runner_lib as learn_runner
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'learn_runner', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
LCAV/pyroomacoustics | pyroomacoustics/tests/test_build_rir.py | 1 | 4247 | from __future__ import division, print_function
import pyroomacoustics as pra
import numpy as np
try:
from pyroomacoustics import build_rir
build_rir_available = True
except:
print('build_rir not available')
build_rir_available = False
# tolerance for test success (1%)
tol = 0.01
fdl = 81
fs = 16000
t0 = (2 * fdl + 0.1) / fs
t1 = (3 * fdl - 0.1) / fs
t2 = (4 * fdl + 0.45) / fs
t3 = (5 * fdl + 0.001) / fs
t4 = (6 * fdl + 0.999) / fs
times = np.array(
[
[ t0 , t1 + (1 / 40 / 16000), t2, ],
[ t0, t1 + (10 / fs), 3 * t3, ],
[ t0, t3, t4, ],
],
)
alphas = np.array(
[
[ 1., 0.5, -0.1 ],
[ 0.5, 0.3, 0.1 ],
[ 0.3, 2., 0.1 ],
],
)
visibilities = np.array(
[
[ 1, 1, 1,],
[ 1, 1, 1,],
[ 0, 1, 1,],
],
dtype=np.int32,
)
def build_rir_wrap(time, alpha, visibility, fs, fdl):
# fractional delay length
fdl = pra.constants.get('frac_delay_length')
fdl2 = (fdl-1) // 2
# the number of samples needed
N = int(np.ceil(time.max() * fs) + fdl)
ir_ref = np.zeros(N)
ir_cython = np.zeros(N)
# Try to use the Cython extension
build_rir.fast_rir_builder(ir_cython, time, alpha, visibility, fs, fdl)
# fallback to pure Python implemenation
for i in range(time.shape[0]):
if visibility[i] == 1:
time_ip = int(np.round(fs * time[i]))
time_fp = (fs * time[i]) - time_ip
ir_ref[time_ip-fdl2:time_ip+fdl2+1] += alpha[i] * pra.fractional_delay(time_fp)
return ir_ref, ir_cython
def test_build_rir():
if not build_rir_available:
return
for t, a, v in zip(times, alphas, visibilities):
ir_ref, ir_cython = build_rir_wrap(times[0], alphas[0], visibilities[0], fs, fdl)
assert np.max(np.abs(ir_ref - ir_cython)) < tol
def test_short():
''' Tests that an error is raised if a provided time goes below the zero index '''
if not build_rir_available:
return
N = 100
fs = 16000
fdl = 81
rir = np.zeros(N)
time = np.array([0.])
alpha = np.array([1.])
visibility = np.array([1], dtype=np.int32)
try:
build_rir.fast_rir_builder(rir, time, alpha, visibility, fs, fdl)
assert False
except AssertionError:
print('Ok, short times are caught')
def test_long():
''' Tests that an error is raised if a time falls outside the rir array '''
if not build_rir_available:
return
N = 100
fs = 16000
fdl = 81
rir = np.zeros(N)
time = np.array([(N-1) / fs])
alpha = np.array([1.])
visibility = np.array([1], dtype=np.int32)
try:
build_rir.fast_rir_builder(rir, time, alpha, visibility, fs, fdl)
assert False
except AssertionError:
print('Ok, long times are caught')
def test_errors():
''' Tests that errors are raised when array lengths differ '''
if not build_rir_available:
return
N = 300
fs = 16000
fdl = 81
rir = np.zeros(N)
time = np.array([100 / fs, 200 / fs])
alpha = np.array([1., 1.])
visibility = np.array([1, 1], dtype=np.int32)
try:
build_rir.fast_rir_builder(rir, time, alpha[:1], visibility, fs, fdl)
assert False
except:
print('Ok, alpha error occured')
pass
try:
build_rir.fast_rir_builder(rir, time, alpha, visibility[:1], fs, fdl)
assert False
except:
print('Ok, visibility error occured')
pass
try:
build_rir.fast_rir_builder(rir, time, alpha, visibility, fs, 80)
assert False
except:
print('Ok, fdl error occured')
pass
if __name__ == '__main__':
import matplotlib.pyplot as plt
for t, a, v in zip(times, alphas, visibilities):
ir_ref, ir_cython = build_rir_wrap(times[0], alphas[0], visibilities[0], fs, fdl)
print('Error:', np.max(np.abs(ir_ref - ir_cython)))
plt.figure()
plt.plot(ir_ref, label='ref')
plt.plot(ir_cython, label='cython')
plt.legend()
test_short()
test_long()
test_errors()
plt.show()
| mit |
prashantvyas/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
depet/scikit-learn | examples/applications/face_recognition.py | 12 | 5368 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Gerhard_Schroeder 0.91 0.75 0.82 28
Donald_Rumsfeld 0.84 0.82 0.83 33
Tony_Blair 0.65 0.82 0.73 34
Colin_Powell 0.78 0.88 0.83 58
George_W_Bush 0.93 0.86 0.90 129
avg / total 0.86 0.84 0.85 282
"""
from __future__ import print_function
from time import time
import logging
import pylab as pl
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# fot machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='auto'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
pl.subplot(n_row, n_col, i + 1)
pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
pl.title(titles[i], size=12)
pl.xticks(())
pl.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
pl.show()
| bsd-3-clause |
minrk/oauthenticator | docs/source/conf.py | 2 | 4506 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from subprocess import check_call
source = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'OAuthenticator'
copyright = 'Jupyter Contributors'
author = 'Jupyter Contributors'
master_doc = 'index'
import oauthenticator
# The short X.Y version.
version = '%i.%i' % oauthenticator.version_info[:2]
# The full version, including alpha/beta/rc tags.
release = oauthenticator.__version__
# -- generate autodoc classes from entrypoints
from collections import defaultdict
import entrypoints
import jinja2
def render_autodoc_modules():
authenticator_entrypoints = entrypoints.get_group_named(
"jupyterhub.authenticators"
).values()
api = os.path.join(source, "api")
api_gen = os.path.join(api, "gen")
# modules is a dict of dicts of lists
# { '$module': { 'classes': [...], 'configurables': [...] } }
modules = defaultdict(lambda : defaultdict(list))
# pre-load base classes
modules['oauthenticator.oauth2'] = {
'classes': [
'OAuthLoginHandler',
'OAuthCallbackHandler',
],
'configurables': [
'OAuthenticator',
],
}
# load Authenticator classes from entrypoints
for ep in authenticator_entrypoints:
if ep.module_name and ep.module_name.startswith('oauthenticator.'):
modules[ep.module_name]['configurables'].append(ep.object_name)
with open(os.path.join(api, "authenticator.rst.tpl")) as f:
tpl = jinja2.Template(f.read())
try:
os.makedirs(os.path.join(api_gen))
except FileExistsError:
pass
for mod, mod_content in modules.items():
dest = os.path.join(api_gen, mod + ".rst")
print(
"Autogenerating module documentation in {} with classes: {}".format(
dest, mod_content
)
)
with open(dest, "w") as f:
f.write(tpl.render(module=mod, **mod_content))
# render the module index
with open(os.path.join(api, "index.rst.tpl")) as f:
index_tpl = jinja2.Template(f.read())
with open(os.path.join(api, "index.rst"), "w") as f:
f.write(index_tpl.render(modules=modules))
render_autodoc_modules()
autodoc_mock_imports = ["tornado", "jwt", "mwoauth", "globus_sdk"]
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'autodoc_traits',
'sphinx_copybutton',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
import recommonmark
from recommonmark.transform import AutoStructify
def setup(app):
app.add_config_value('recommonmark_config', {'enable_eval_rst': True}, True)
app.add_stylesheet('custom.css')
app.add_transform(AutoStructify)
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pandas_sphinx_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| bsd-3-clause |
mlyundin/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
yousrabk/mne-python | mne/viz/tests/test_3d.py | 4 | 7511 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Mark Wronkiewicz <wronk.mark@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises, assert_equal
from mne import (make_field_map, pick_channels_evoked, read_evokeds,
read_trans, read_dipole, SourceEstimate)
from mne.viz import (plot_sparse_source_estimates, plot_source_estimates,
plot_trans)
from mne.utils import requires_mayavi, requires_pysurfer, run_tests_if_main
from mne.datasets import testing
from mne.source_space import read_source_spaces
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
trans_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
src_fname = op.join(data_dir, 'subjects', 'sample', 'bem',
'sample-oct-6-src.fif')
dip_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
warnings.simplefilter('always') # enable b/c these tests throw warnings
@testing.requires_testing_data
@requires_pysurfer
@requires_mayavi
def test_plot_sparse_source_estimates():
"""Test plotting of (sparse) source estimates
"""
sample_src = read_source_spaces(src_fname)
# dense version
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(np.random.rand(stc_size / 20) * stc_size).astype(int)] = \
np.random.RandomState(0).rand(stc_data.size / 20)
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
colormap = 'mne_analyze'
plot_source_estimates(stc, 'sample', colormap=colormap,
config_opts={'background': (1, 1, 0)},
subjects_dir=subjects_dir, colorbar=True,
clim='auto')
assert_raises(TypeError, plot_source_estimates, stc, 'sample',
figure='foo', hemi='both', clim='auto')
# now do sparse version
vertices = sample_src[0]['vertno']
inds = [111, 333]
stc_data = np.zeros((len(inds), n_time))
stc_data[0, 1] = 1.
stc_data[1, 4] = 2.
vertices = [vertices[inds], np.empty(0, dtype=np.int)]
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=False)
@testing.requires_testing_data
@requires_mayavi
def test_plot_evoked_field():
"""Test plotting evoked field
"""
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed
for t in ['meg', None]:
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, n_jobs=1, ch_type=t)
evoked.plot_field(maps, time=0.1)
@testing.requires_testing_data
@requires_mayavi
def test_plot_trans():
"""Test plotting of -trans.fif files
"""
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
plot_trans(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_trans, evoked.info, trans_fname,
subject='sample', subjects_dir=subjects_dir,
ch_type='bad-chtype')
@testing.requires_testing_data
@requires_pysurfer
@requires_mayavi
def test_limits_to_control_points():
"""Test functionality for determing control points
"""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
from mayavi import mlab
stc.plot(subjects_dir=subjects_dir)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
subjects_dir=subjects_dir)
stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
figs = [mlab.figure(), mlab.figure()]
assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs,
subjects_dir=subjects_dir)
# Test both types of incorrect limits key (lims/pos_lims)
assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
clim=dict(kind='value', lims=(5, 10, 15)),
subjects_dir=subjects_dir)
assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
clim=dict(kind='value', pos_lims=(5, 10, 15)),
subjects_dir=subjects_dir)
# Test for correct clim values
assert_raises(ValueError, stc.plot,
clim=dict(kind='value', pos_lims=[0, 1, 0]),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, colormap='mne',
clim=dict(pos_lims=(5, 10, 15, 20)),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot,
clim=dict(pos_lims=(5, 10, 15), kind='foo'),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, colormap='mne', clim='foo',
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, clim=(5, 10, 15),
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, hemi='foo', clim='auto',
subjects_dir=subjects_dir)
# Test handling of degenerate data
stc.plot(clim=dict(kind='value', lims=[0, 0, 1]),
subjects_dir=subjects_dir) # ok
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# thresholded maps
stc._data.fill(1.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 0)
stc._data[0].fill(0.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 0)
stc._data.fill(0.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 1)
mlab.close()
@testing.requires_testing_data
@requires_mayavi
def test_plot_dipole_locations():
"""Test plotting dipole locations
"""
dipoles = read_dipole(dip_fname)
trans = read_trans(trans_fname)
dipoles.plot_locations(trans, 'sample', subjects_dir, fig_name='foo')
assert_raises(ValueError, dipoles.plot_locations, trans, 'sample',
subjects_dir, mode='foo')
run_tests_if_main()
| bsd-3-clause |
achim1/HErmes | HErmes/selection/variables.py | 2 | 26753 | """
Container classes for variables
"""
import numpy as np
import os
import pandas as pd
import tables
import abc
import enum
import array
import numbers
from ..utils import files as f
from ..utils import Logger
from copy import deepcopy as copy
from collections import Sequence
from itertools import chain, count
DEFAULT_BINS = 70
REGISTERED_FILEEXTENSIONS = [".h5"]
try:
import uproot as ur
import uproot_methods.classes.TVector3 as TVector3
import uproot_methods.classes.TLorentzVector as TLorentzVector
import uproot_methods.classes.TH1
REGISTERED_FILEEXTENSIONS.append(".root")
except ImportError:
Logger.warning("No uproot found, root support is limited!")
# helper
def _depth(seq):
"""
Infer out the depth of a nested sequence.
"""
for level in count(1):
#print (seq)
if not hasattr(seq,"__iter__"):
return level
else:
if not hasattr(seq[0],"__iter__"):
return level + 1
else:
if len(seq[0]) == 0:
return level + 1
seq = seq[0]
#seq = list(chain.from_iterable(s for s in seq if isinstance(s, Sequence)))
if level > 5:
raise ValueError("This data has a nesting level > 5. This seems at the edge of being useful")
################################################################
# define a non-member function so that it can be used in a
# multiprocessing approach
def extract_from_root(filename, definitions,
nevents=None,
dtype=np.float64,
transform = None,
reduce_dimension=None):
"""
Use the uproot system to get information from rootfiles. Supports a basic tree of
primitive datatype like structure.
Args:
filename (str): datafile
definitiions (list): tree and branch adresses
Keyword Args:
nevents (int): number of events to read out
reduce_dimension (int): If data is vector-type, reduce it by taking the n-th element
dtype (np.dtyoe): A numpy datatype, default double (np.float64) - use smaller dtypes to
save memory
transform (func): A function which directy transforms the readout data
"""
can_be_concatted = False
rootfile = ur.open(filename)
success = False
i=0
branch = None
# it will most likely work only with TTrees
while not success:
try:
tree = rootfile.get(definitions[i][0])
branch = tree
for address in definitions[i][1:]:
Logger.debug("Searching for address {}".format(address))
branch = branch.get(address)
#tree = file[definitions[i][0]]
#branch = rootfile[definitions[i][0]].get(definitions[i][1])
success = True
except KeyError as e:
Logger.warning(f"Can not find address {definitions[i]}")
i+=1
except IndexError:
Logger.critical(f"None of the provided keys could be found {definitions}")
break
Logger.debug(f"Found valid definitions {definitions[i]}")
##FiXME make logger.critical end program!
if nevents is not None:
data = branch.array(entrystop=nevents)
else:
data = branch.array()
# check for dimensionality
multidim = False
try:
len(data[0])
multidim = True
except TypeError:
Logger.debug(f"Assuming scalar data {definitions[i]}")
if multidim:
Logger.debug("Inspecting data...")
tmp_lengths = set([len(k) for k in data])
Logger.debug("Found {}".format(tmp_lengths))
firstlen = list(tmp_lengths)[0]
if (len(tmp_lengths) == 1) and (firstlen == 1):
multidim = False
Logger.debug("Found data containing iterables of size 1... flattening!")
del tmp_lengths
if dtype != np.float64:
tmpdata = array.array("f",[])
else:
tmpdata = array.array("d",[])
if isinstance(data[0][0], numbers.Number):
[tmpdata.append(dtype(k)) for k in data]
#tmpdata = np.asarray([k[0] for k in data])
#multidim = True
data = tmpdata
del tmpdata
else:
Logger.info("Is multidim data")
multidim = True
else:
del tmp_lengths
multidim = True
Logger.debug("Assuming array data {}".format(definitions[i]))
if reduce_dimension is not None:
if not multidim:
raise ValueError("Can not reduce scalar data!")
if isinstance(reduce_dimension, int):
data = np.array([k[reduce_dimension] for k in data], dtype=dtype)
multidim = False
else:
data = [[k[reduce_dimension[1]] for k in j] for j in data]
if multidim:
Logger.debug("Grabbing multidimensional data from root-tree for {}".format(definitions[i]))
del data
if nevents is None:
data = branch.array() #this will be a jagged array now!
else:
data = branch.array(entrystop=nevents)
del branch
if (len(data[0])):
if isinstance(data[0][0], TVector3.TVector3):
Logger.info("Found TVector3 data, treating appropriatly")
data = pd.Series([np.array([i.x,i.y,i.z], dtype=dtype) for i in data])
if isinstance(data[0][0], TLorentzVector.TLorentzVector):
Logger.info("Found TLorentzVector data, treating appropriatly")
data = pd.Series([np.array([i.x,i.y,i.z,i.t], dtype=dtype) for i in data])
else: # probably number then
data = pd.Series([np.asarray(i,dtype=dtype) for i in data])
# the below might not be picklable (multiprocessing!)
#tmpdata = [i for i in data]
# FIXME: dataframe/series
# try to convert this to a pandas dataframe
#data = pd.DataFrame(tmpdata)
can_be_concatted = True
data.multidim = True
else:
Logger.debug("Grabbing scalar data from root-tree for {}".format(definitions[i]))
# convert in cases of TVector3/TLorentzVector
if isinstance(data[0], TVector3.TVector3):
Logger.debug("Found TVector3")
data = pd.Series([np.array([i.x,i.y,i.z], dtype=dtype) for i in data])
elif isinstance(data[0], TLorentzVector.TLorentzVector):
Logger.debug("Found TLorentzVector")
data = pd.Series([np.array([i.x,i.y,i.z, i.t], dtype=dtype) for i in data])
else:
try:
#FIXME: why is that asarray needed?
#data = pd.Series(np.asarray(data,dtype=dtype))
data = pd.Series(data,dtype=dtype)
except TypeError: # data consist of some object
data = pd.Series(data)
Logger.debug("Got {} elements for {}".format(len(data), definitions[i]))
can_be_concatted = True
if transform is not None:
data = transform(data)
return data, can_be_concatted
################################################################
# define a non-member function so that it can be used in a
# multiprocessing approach
def harvest(filenames, definitions, **kwargs):
"""
Read variables from files into memory. Will be used by HErmes.selection.variables.Variable.harvest
This will be run multi-threaded. Keep that in mind, arguments have to be picklable,
also everything thing which is read out must be picklable. Lambda functions are NOT picklable
Args:
filenames (list): the files to extract the variables from.
currently supported: hdf
definitions (list): where to find the data in the files. They usually
have some tree-like structure, so this a list
of leaf-value pairs. If there is more than one
all of them will be tried. (As it might be that
in some files a different naming scheme was used)
Example: [("hello_reoncstruction", "x"), ("hello_reoncstruction", "y")] ]
Keyword Args:
transformation (func): After the data is read out from the files,
transformation will be applied, e.g. the log
to the energy.
fill_empty (bool): Fill empty fields with zeros
nevents (int): ROOT only - read out only nevents from the files
reduce_dimension (str): ROOT only - multidimensional data can be reduced by only
using the index given by reduce_dimension.
E.g. in case of a TVector3, and we want to have onlz
x, that would be 0, y -> 1 and z -> 2.
dtype (np.dtype) : datatype to cast to (default np.float64, but can be used
to reduce memory footprint.
Returns:
pd.Series or pd.DataFrame
"""
nevents = kwargs["nevents"] if "nevents" in kwargs else None
fill_empty = kwargs["fill_empty"] if "fill_empty" in kwargs else False
reduce_dimension = kwargs["reduce_dimension"] if "reduce_dimension" in kwargs else None
transform = kwargs["transformation"] if "transformation" in kwargs else None
dtype = kwargs["dtype"] if "dtype" in kwargs else np.float64
concattable = True
data = pd.Series(dtype=dtype)
#multidim_data = pd.DataFrame()
for filename in filenames:
filetype = f.strip_all_endings(filename)[1]
assert filetype in REGISTERED_FILEEXTENSIONS, "Filetype {} not known!".format(filetype)
assert os.path.exists(filename), "File {} does not exist!".format(filetype)
if (filetype == ".h5") and (transform is not None):
Logger.critical("Can not apply direct transformation for h5 files (yet). This is only important for root files and varaibles which are used as VariableRole.PARAMETER")
Logger.debug("Attempting to harvest {1} file {0}".format(filename,filetype))
if filetype == ".h5" and not isinstance(filename, tables.table.Table):
# store = pd.HDFStore(filename)
hdftable = tables.open_file(filename)
else:
hdftable = filename
tmpdata = pd.Series(dtype=dtype)
for definition in definitions:
definition = list(definition)
if filetype == ".h5":
if not definition[0].startswith("/"):
definition[0] = "/" + definition[0]
try:
# data = store.select_column(*definition)
if not definition[1]:
tmpdata = hdftable.get_node(definition[0])
else:
tmpdata = hdftable.get_node(definition[0]).col(definition[1])
if tmpdata.ndim == 2:
if data.empty:
data = pd.DataFrame()
tmpdata = pd.DataFrame(tmpdata, dtype=dtype)
else:
tmpdata = pd.Series(tmpdata, dtype=dtype)
Logger.debug("Found {} entries in table for {}{}".format(len(tmpdata),definition[0],definition[1]))
break
except tables.NoSuchNodeError:
Logger.debug("Can not find definition {0} in {1}! ".format(definition, filename))
continue
elif filetype == ".root":
tmpdata, concattable = extract_from_root(filename, definitions,
nevents=nevents,
dtype=dtype,
transform=transform,
reduce_dimension=reduce_dimension)
if filetype == ".h5":
hdftable.close()
#tmpdata = harvest_single_file(filename, filetype,definitions)
# self.data = self.data.append(data.map(self.transform))
# concat should be much faster
if not True in [isinstance(tmpdata, k) for k in [pd.Series, pd.DataFrame] ]:
concattable = False
if not concattable:
Logger.warning(f"Data {definitions} can not be concatted, keep that in mind!")
try:
tmpdata = pd.Series(tmpdata)
#return tmpdata
except:
tmpdata = [k for k in tmpdata]
tmpdata = pd.Series(tmpdata)
#return tmpdata
data = pd.concat([data, tmpdata])
del tmpdata
return data
################################################################
def freedman_diaconis_bins(data,leftedge,\
rightedge,minbins=20,\
maxbins=70,fallbackbins=DEFAULT_BINS):
"""
Get a number of bins for a histogram
following Freedman/Diaconis
Args:
leftedge (float): left bin edge
rightedge (float): right bin edge
minbins (int): the minimum number of bins
maxbins (int): the maximum number of bins
fallbackbins (int): a number of bins which is returned
if calculation failse
Returns:
nbins (int): number of bins, minbins < bins < maxbins
"""
try:
finite_data = np.isfinite(data)
q3 = np.percentile(data[finite_data],75)
q1 = np.percentile(data[finite_data],25)
n_data = len(data)
if q3 == q1:
Logger.warning("Can not calculate bins, falling back... to min max approach")
q3 = max(finite_data)
q1 = min(finite_data)
h = (2*(q3-q1))/(n_data**1./3)
bins = (rightedge - leftedge)/h
if not np.isfinite(bins):
Logger.info(f"Got some nan somewhere: q1 : {q1}, q3 : {q3}, n_data : {n_data}, h : {h}")
Logger.warning("Calculate Freedman-Draconis bins failed, calculated nan bins, returning fallback")
bins = fallbackbins
if bins < minbins:
bins = minbins
if bins > maxbins:
bins = maxbins
except Exception as e:
Logger.warning(f"Calculate Freedman-Draconis bins failed {e}")
bins = fallbackbins
return bins
#############################################################
class VariableRole(enum.Enum):
"""
Define roles for variables. Some variables used in a special context (like weights)
are easily recognizable by this flag.
"""
UNKNOWN = 0
SCALAR = 10
ARRAY = 20
GENERATORWEIGHT = 30
RUNID = 40
EVENTID = 50
STARTIME = 60
ENDTIME = 70
FLUXWEIGHT = 80
PARAMETER = 90 # a single parameter, no array whatsoever
##############################################################
class AbstractBaseVariable(metaclass=abc.ABCMeta):
"""
Read out tagged numerical data from files
"""
_harvested = False
_bins = None
ROLES = VariableRole
def __hash__(self):
return hash(self.name)
def __repr__(self):
return """<Variable: {}>""".format(self.name)
def __eq__(self,other):
return self.name == other.name
def __lt__(self, other):
return sorted([self.name,other.name])[0] == self.name
def __gt__(self, other):
return sorted([self.name,other.name])[1] == self.name
def __le__(self, other):
return self < other or self == other
def __ge__(self, other):
return self > other or self == other
def declare_harvested(self):
self._harvested = True
@property
def harvested(self):
return self._harvested
@property
def bins(self):
if self._bins is None:
return self.calculate_fd_bins()
else:
return self._bins
@bins.setter
def bins(self, value):
self._bins = value
def calculate_fd_bins(self, cutmask=None):
"""
Calculate a reasonable binning
Keyword Args:
cutmask (numpy.ndarray) : a boolean mask to cut on, in case
cuts have been applied to the
category this data is part of
Returns:
numpy.ndarray: Freedman Diaconis bins
"""
tmpdata = self.data
if cutmask is not None:
if len(cutmask) > 0:
tmpdata = tmpdata[cutmask]
try:
min(tmpdata)
except Exception as e:
Logger.warning(f"Can not infere minimum of {tmpdata}. Fall back to DEFAULT_BINS. This is a bug!")
return DEFAULT_BINS
nbins = freedman_diaconis_bins(tmpdata, min(tmpdata), max(tmpdata))
bins = np.linspace(min(tmpdata),max(tmpdata), nbins)
return bins
def harvest(self, *files):
"""
Hook to the harvest method. Don't use in case of multiprocessing!
Args:
*files: walk through these files and readout
"""
if self.role == VariableRole.PARAMETER:
self._data = harvest(files, self.definitions, transformation= self.transform)
self._data = self._data[0]
else:
self._data = harvest(files, self.definitions)
self.declare_harvested()
@abc.abstractmethod
def rewire_variables(self, vardict):
return
@property
def ndim(self):
"""
Infer the nesting depth of the data
"""
if hasattr(self._data, "multidim"):
if self._data.multidim == True:
return 2
if self._data.ndim == 1:
# check again
level = _depth(self._data)
if level != self._data.ndim:
Logger.warning(f"Discrepancy in dimensionality found {level} VS {self._data.ndmin}")
return level
return self._data.ndim
@property
def data(self):
if isinstance(self._data, pd.DataFrame):
#return self._data.as_matrix()
#FIXME: as_matrix is depracted in favor of values
return self._data.values
if not hasattr(self._data, "shape"):
Logger.warning("Something's wrong, this should be array data!")
Logger.warning(f"Seeeing {type(self._data)} data")
Logger.warning("Attempting to fix!")
self._data = np.asarray(self._data)
return self._data
############################################
class Variable(AbstractBaseVariable):
"""
A hook to a single variable read out from a file
"""
def __init__(self, name, definitions=None,\
bins=None, label="", transform=None,
role=VariableRole.SCALAR,
nevents=None,
reduce_dimension=None):
"""
Args:
name (str) : An unique identifier
Keyword Args:
definitions (list) : table and/or column names in underlying data
bins (numpy.ndarray) : used for histograms
label (str) : used for plotting and as a label in tables
transform (func) : apply to each member of the underlying data at readout
role (HErmes.selection.variables.VariableRole) : The role the variable is playing.
In most cases the default is the best choice
nevents (int) : number of events to read in (ROOT only right now!)
reduce_dimension (int) : in case of multidimensionality,
take only the the given index of the array (ROOT only right now)
"""
AbstractBaseVariable.__init__(self)
if definitions is not None:
#assert not (False in [len(x) <= 2 for x in definitions]), "Can not understand variable definitions {}!".format(definitions)
self.defsize = len(definitions[0])
#FIXME : not sure how important this is right now
#assert not (False in [len(x) == self.defsize for x in definitions]), "All definitions must have the same length!"
else:
self.defsize = 0
self.name = name
self.role = role
self.bins = bins # when histogrammed
self.label = label
self.transform = transform
self.definitions = definitions
self._data = pd.Series(dtype=np.float64)
self.nevents = nevents
self.reduce_dimension = reduce_dimension
self._role = role
#if self.defsize == 1:
# self.data = pd.DataFrame()
#if self.defsize == 2:
# self.data = pd.Series()
def rewire_variables(self, vardict):
"""
Make sure all the variables are connected properly. This is
only needed for combined/compound variables
Returns:
None
"""
pass
##########################################################
class CompoundVariable(AbstractBaseVariable):
"""
Calculate a variable from other variables. This kind of variable will not read any file.
"""
def __init__(self, name, variables=None, label="",\
bins=None, operation=lambda x,y : x + y,
role=VariableRole.SCALAR,
dtype=np.float64):
"""
A compound variable is a variable which is created from two or more other variables. This variable does not have
a direct representation in a file, but gets calculated on the fly instead, e.g. a residual of two other variables
The 'operation' denoted function here defines what operator should be applied to the variables to create the new
coumpound variable
Args:
name (str) : An unique identifier for the new variable.
Keyword Args:
variables (list) : A list of variables used to calculate the new variable.
label (str) : A label for plotting.
bins (np.ndarray) : binning for distributions.
operation (fnc) : The operation which will be applied to variables.
role (HErmes.selection.variables.VariableRole) : The role the variable is playing.
In most cases the default is the best choice. Assigning roles
to variables allows for special magic, e.g. in the case
of weighting
"""
AbstractBaseVariable.__init__(self)
self.name = name
self.role = role
self.label = label
self.bins = bins
if variables is None:
variables = []
self.variables = variables
self.operation = operation
self._data = pd.Series(dtype=np.float64) #dtype to suppress warning
self.definitions = ((self.__repr__()),)
def rewire_variables(self, vardict):
"""
Use to avoid the necessity to read out variables twice
as the variables are copied over by the categories,
the refernce is lost. Can be rewired though
"""
newvars = []
for var in self.variables:
newvars.append(vardict[var.name])
self.variables = newvars
def __repr__(self):
return """<CompoundVariable {} created from: {}>""".format(self.name,"".join([x.name for x in self.variables ]))
def harvest(self, *filenames):
#FIXME: filenames is not used, just
#there for compatibility
if self.harvested:
return
harvested = [var for var in self.variables if var.harvested]
if not len(harvested) == len(self.variables):
Logger.error("Variables have to be harvested for compound variable {0} first!".format(self.variables))
Logger.error("Only {} is harvested".format(harvested))
return
self._data = self.operation(*[var.data for var in self.variables])
self.declare_harvested()
##########################################################
class VariableList(AbstractBaseVariable):
"""
A list of variable. Can not be read out from files.
"""
def __init__(self, name, variables=None, label="", bins=None, role=VariableRole.SCALAR):
"""
Args:
name (str): An unique identifier for the new category.
Keyword Args:
variables (list): A list of variables used to calculate the new variable.
label (str): A label for plotting.
bins (np.ndarray): binning for distributions.
role (HErmes.selection.variables.VariableRole): The role the variable is playing. In most cases the default is the best choice
"""
AbstractBaseVariable.__init__(self)
self.name = name
self.label = label
self.bins = bins
if variables is None:
variables = []
self.variables = variables
def harvest(self, *filenames):
#FIXME: filenames is not used, just
#there for compatibility
# do not calculate weights yet!
if self.harvested:
return
harvested = [var for var in self.variables if var.harvested]
if not len(harvested) == len(self.variables):
Logger.error("Variables have to be harvested for compound variable {} first!".format(self.name))
return
self.declare_harvested()
def rewire_variables(self, vardict):
"""
Use to avoid the necessity to read out variables twice
as the variables are copied over by the categories,
the refernce is lost. Can be rewired though
"""
newvars = []
for var in self.variables:
newvars.append(vardict[var.name])
self.variables = newvars
@property
def data(self):
return [x.data for x in self.variables]
| gpl-2.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/core/nanops.py | 7 | 23743 | import itertools
import functools
import operator
import numpy as np
from pandas import compat
from pandas._libs import tslib, algos, lib
from pandas.core.dtypes.common import (
_get_dtype,
is_float, is_scalar,
is_integer, is_complex, is_float_dtype,
is_complex_dtype, is_integer_dtype,
is_bool_dtype, is_object_dtype,
is_numeric_dtype,
is_datetime64_dtype, is_timedelta64_dtype,
is_datetime_or_timedelta_dtype,
is_int_or_datetime_dtype, is_any_int_dtype)
from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask
from pandas.core.dtypes.missing import isnull, notnull
from pandas.core.config import get_option
from pandas.core.common import _values_from_object
try:
import bottleneck as bn
_BOTTLENECK_INSTALLED = True
except ImportError: # pragma: no cover
_BOTTLENECK_INSTALLED = False
_USE_BOTTLENECK = False
def set_use_bottleneck(v=True):
# set/unset to use bottleneck
global _USE_BOTTLENECK
if _BOTTLENECK_INSTALLED:
_USE_BOTTLENECK = v
set_use_bottleneck(get_option('compute.use_bottleneck'))
class disallow(object):
def __init__(self, *dtypes):
super(disallow, self).__init__()
self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
raise TypeError('reduction operation {0!r} not allowed for '
'this dtype'.format(
f.__name__.replace('nan', '')))
try:
with np.errstate(invalid='ignore'):
return f(*args, **kwargs)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(args[0]):
raise TypeError(e)
raise
return _f
class bottleneck_switch(object):
def __init__(self, zero_value=None, **kwargs):
self.zero_value = zero_value
self.kwargs = kwargs
def __call__(self, alt):
bn_name = alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in compat.iteritems(self.kwargs):
if k not in kwds:
kwds[k] = v
try:
if self.zero_value is not None and values.size == 0:
if values.ndim == 1:
# wrap the 0's if needed
if is_timedelta64_dtype(values):
return lib.Timedelta(0)
return 0
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
result = np.empty(result_shape)
result.fill(0)
return result
if (_USE_BOTTLENECK and skipna and
_bn_ok_dtype(values.dtype, bn_name)):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
try:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(values):
raise TypeError(e)
raise
return result
return f
def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if (not is_object_dtype(dt) and not is_datetime_or_timedelta_dtype(dt)):
# bottleneck does not properly upcast during the sum
# so can overflow
if name == 'nansum':
if dt.itemsize < 8:
return False
return True
return False
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
return lib.has_infs_f8(result.ravel())
elif result.dtype == 'f4':
return lib.has_infs_f4(result.ravel())
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError):
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == '+inf':
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslib.iNaT
else:
if fill_value_typ == '+inf':
# need the max int here
return _int64_max
else:
return tslib.iNaT
def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
isfinite=False, copy=True):
""" utility to get the values view, mask, dtype
if necessary copy and mask using the specified fill_value
copy = True will force the copy
"""
values = _values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
mask = isnull(values)
dtype = values.dtype
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(dtype, fill_value=fill_value,
fill_value_typ=fill_value_typ)
if skipna:
if copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = maybe_upcast_putmask(values, mask, fill_value)
elif copy:
values = values.copy()
values = _view_if_needed(values)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.int64
elif is_float_dtype(dtype):
dtype_max = np.float64
return values, mask, dtype, dtype_max
def _isfinite(values):
if is_datetime_or_timedelta_dtype(values):
return isnull(values)
if (is_complex_dtype(values) or is_float_dtype(values) or
is_integer_dtype(values) or is_bool_dtype(values)):
return ~np.isfinite(values)
return ~np.isfinite(values.astype('float64'))
def _na_ok_dtype(dtype):
return not is_int_or_datetime_dtype(dtype)
def _view_if_needed(values):
if is_datetime_or_timedelta_dtype(values):
return values.view(np.int64)
return values
def _wrap_results(result, dtype):
""" wrap our results if needed """
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
result = lib.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
return result
def nanany(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna)
return values.any(axis)
def nanall(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
@disallow('M8')
@bottleneck_switch(zero_value=0)
def nansum(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
elif is_timedelta64_dtype(dtype):
dtype_sum = np.float64
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask)
return _wrap_results(the_sum, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
dtype_count = np.float64
if is_integer_dtype(dtype) or is_timedelta64_dtype(dtype):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(mask, axis, dtype=dtype_count)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
if axis is not None and getattr(the_sum, 'ndim', False):
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna)
def get_median(x):
mask = notnull(x)
if not skipna and not mask.all():
return np.nan
return algos.median(_values_from_object(x[mask]))
if not is_float_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if axis is None:
values = values.ravel()
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
return _wrap_results(
np.apply_along_axis(get_median, axis, values), dtype)
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
shp = np.array(values.shape)
dims = np.arange(values.ndim)
ret = np.empty(shp[dims != axis])
ret.fill(np.nan)
return _wrap_results(ret, dtype)
# otherwise return a scalar value
return _wrap_results(get_median(values) if notempty else np.nan, dtype)
def _get_counts_nanvar(mask, axis, ddof, dtype=float):
dtype = _get_dtype(dtype)
count = _get_counts(mask, axis, dtype=dtype)
d = count - dtype.type(ddof)
# always return NaN, never inf
if is_scalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2 = count <= ddof
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanstd(values, axis=None, skipna=True, ddof=1):
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof))
return _wrap_results(result, values.dtype)
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1):
values = _values_from_object(values)
dtype = values.dtype
mask = isnull(values)
if is_any_int_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if is_float_dtype(values):
count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(mask, axis, ddof)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
sqr = _ensure_numeric((avg - values)**2)
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if is_float_dtype(dtype):
result = result.astype(dtype)
return _wrap_results(result, values.dtype)
@disallow('M8', 'm8')
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count, _ = _get_counts_nanvar(mask, axis, ddof, values.dtype)
var = nanvar(values, axis, skipna, ddof=ddof)
return np.sqrt(var) / np.sqrt(count)
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch()
def reduction(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(
values, skipna, fill_value_typ=fill_value_typ, )
if ((axis is not None and values.shape[axis] == 0) or
values.size == 0):
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
except:
result = np.nan
else:
result = getattr(values, meth)(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
reduction.__name__ = 'nan' + meth
return reduction
nanmin = _nanminmax('min', fill_value_typ='+inf')
nanmax = _nanminmax('max', fill_value_typ='-inf')
def nanargmax(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf',
isfinite=True)
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
def nanargmin(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf',
isfinite=True)
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('M8', 'm8')
def nanskew(values, axis=None, skipna=True):
""" Compute the sample skewness.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G1. The algorithm computes this coefficient directly
from the second and third central moment.
"""
values = _values_from_object(values)
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted3 = adjusted2 * adjusted
m2 = adjusted2.sum(axis, dtype=np.float64)
m3 = adjusted3.sum(axis, dtype=np.float64)
# floating point error
m2 = _zero_out_fperr(m2)
m3 = _zero_out_fperr(m3)
with np.errstate(invalid='ignore', divide='ignore'):
result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5)
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(m2 == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if m2 == 0 else result
if count < 3:
return np.nan
return result
@disallow('M8', 'm8')
def nankurt(values, axis=None, skipna=True):
""" Compute the sample skewness.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
"""
values = _values_from_object(values)
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted4 = adjusted2 ** 2
m2 = adjusted2.sum(axis, dtype=np.float64)
m4 = adjusted4.sum(axis, dtype=np.float64)
with np.errstate(invalid='ignore', divide='ignore'):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numer = count * (count + 1) * (count - 1) * m4
denom = (count - 2) * (count - 3) * m2**2
result = numer / denom - adj
# floating point error
numer = _zero_out_fperr(numer)
denom = _zero_out_fperr(denom)
if not isinstance(denom, np.ndarray):
# if ``denom`` is a scalar, check these corner cases first before
# doing division
if count < 4:
return np.nan
if denom == 0:
return 0
with np.errstate(invalid='ignore', divide='ignore'):
result = numer / denom - adj
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(denom == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow('M8', 'm8')
def nanprod(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not is_any_int_dtype(values):
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask)
def _maybe_arg_null_out(result, axis, mask, skipna):
# helper function for nanargmin/nanargmax
if axis is None or not getattr(result, 'ndim', False):
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(mask, axis, dtype=float):
dtype = _get_dtype(dtype)
if axis is None:
return dtype.type(mask.size - mask.sum())
count = mask.shape[axis] - mask.sum(axis)
if is_scalar(count):
return dtype.type(count)
try:
return count.astype(dtype)
except AttributeError:
return np.array(count, dtype=dtype)
def _maybe_null_out(result, axis, mask):
if axis is not None and getattr(result, 'ndim', False):
null_mask = (mask.shape[axis] - mask.sum(axis)) == 0
if np.any(null_mask):
if is_numeric_dtype(result):
if np.iscomplexobj(result):
result = result.astype('c16')
else:
result = result.astype('f8')
result[null_mask] = np.nan
else:
# GH12941, use None to auto cast null
result[null_mask] = None
elif result is not tslib.NaT:
null_mask = mask.size - mask.sum()
if null_mask == 0:
result = np.nan
return result
def _zero_out_fperr(arg):
if isinstance(arg, np.ndarray):
with np.errstate(invalid='ignore'):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
@disallow('M8', 'm8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError('Operands to nancorr must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method in ['kendall', 'spearman']:
from scipy.stats import kendalltau, spearmanr
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {
'pearson': _pearson,
'kendall': _kendall,
'spearman': _spearman
}
return _cor_methods[method]
@disallow('M8', 'm8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except:
x = x.astype(np.float64)
else:
if not np.any(x.imag):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except Exception:
try:
x = complex(x)
except Exception:
raise TypeError('Could not convert %s to numeric' % str(x))
return x
# NA-friendly array comparisons
def make_nancomp(op):
def f(x, y):
xmask = isnull(x)
ymask = isnull(y)
mask = xmask | ymask
with np.errstate(all='ignore'):
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype('O')
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
| mit |
aabadie/scikit-learn | sklearn/neighbors/classification.py | 15 | 14359 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights[inliers])],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/datasets/tests/test_lfw.py | 42 | 7253 | """This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100,
download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| mit |
devanshdalal/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
treycausey/scikit-learn | examples/plot_precision_recall.py | 3 | 6114 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import pylab as pl
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
pl.clf()
pl.plot(recall[0], precision[0], label='Precision-Recall curve')
pl.xlabel('Recall')
pl.ylabel('Precision')
pl.ylim([0.0, 1.05])
pl.xlim([0.0, 1.0])
pl.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
pl.legend(loc="lower left")
pl.show()
# Plot Precision-Recall curve for each class
pl.clf()
pl.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
pl.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
pl.xlim([0.0, 1.0])
pl.ylim([0.0, 1.05])
pl.xlabel('Recall')
pl.ylabel('Precision')
pl.title('Extension of Precision-Recall curve to multi-class')
pl.legend(loc="lower right")
pl.show()
| bsd-3-clause |
elenbert/allsky | src/webdatagen/sensors-graphgen.py | 1 | 13361 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import csv
import MySQLdb
import sys
import config
def plot_sky_temp(sensor_data, output_file, output_csv_file, one_day=False):
xdata = []
ydata = []
print 'Plotting skytemp graph using ' + str(len(sensor_data)) + ' db records'
for row in sensor_data:
ydata.append(row[1])
xdata.append(row[0])
s = np.array(ydata)
fig, ax = plt.subplots()
plt.plot(xdata, s)
plt.xlabel('Time period: ' + str(xdata[0].date()) \
+ ' - ' + str((xdata[len(xdata)-1]).date()))
if one_day:
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
else:
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
plt.ylabel('Temperature C')
plt.title('Sky temperature')
plt.grid(True)
plt.tight_layout()
plt.savefig(output_file, dpi=120)
plt.gcf().clear()
print 'Skytemp graph saved as ' + output_file
with open(output_csv_file, 'wb') as csvfile:
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
wr.writerow(['date', 'sky temperature'])
for item in sensor_data:
wr.writerow([str(item[0]), item[1]])
print 'Skytemp csv saved as ' + output_csv_file
def plot_ambient_temp(sensor_data, output_file, output_csv_file, one_day=False):
xdata = []
ydata_temper = []
ydata_humidity = []
print 'Plotting ambient temperature/humidity graph using ' + str(len(sensor_data)) + ' db records'
for row in sensor_data:
xdata.append(row[0])
ydata_temper.append(row[1])
ydata_humidity.append(row[2])
temper = np.array(ydata_temper)
humid = np.array(ydata_humidity)
plt.subplot(211)
plt.title('Air temperature and humidity')
plt.plot(xdata, temper, label = "Temperature")
if one_day:
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
else:
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gcf().autofmt_xdate()
plt.legend()
plt.ylabel('Temperature C')
plt.grid(True)
plt.tight_layout()
plt.subplot(212)
plt.plot(xdata, humid, label = "Humidity", color='green')
plt.xlabel('Time period: ' + str(xdata[0].date()) \
+ ' - ' + str((xdata[len(xdata)-1]).date()))
if one_day:
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
else:
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gcf().autofmt_xdate()
plt.grid(True)
plt.legend()
plt.ylabel('Humidity %')
plt.tight_layout()
plt.savefig(output_file, dpi=120)
plt.gcf().clear()
print 'Temperature/humidity graph saved as ' + output_file
with open(output_csv_file, 'wb') as csvfile:
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
wr.writerow(['date', 'temperature', 'humidity'])
for item in sensor_data:
wr.writerow([str(item[0]), item[1], item[2]])
print 'Temperature/humidity csv saved as ' + output_csv_file
def plot_ambient_light(sensor_data, output_file, output_csv_file, one_day=False):
xdata = []
ydata_temper = []
ydata_humidity = []
print 'Plotting ambient ambient light graph using ' + str(len(sensor_data)) + ' db records'
for row in sensor_data:
xdata.append(row[0])
ydata_temper.append(row[1])
ydata_humidity.append(row[2])
temper = np.array(ydata_temper)
humid = np.array(ydata_humidity)
plt.subplot(211)
plt.title('Luminosity and infrared radiation')
plt.plot(xdata, temper, label = "Visible light", color="green")
if one_day:
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
else:
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gcf().autofmt_xdate()
plt.legend()
plt.ylabel('Lux')
plt.grid(True)
plt.tight_layout()
plt.subplot(212)
plt.plot(xdata, humid, label = "Infrared", color='red')
plt.xlabel('Time period: ' + str(xdata[0].date()) \
+ ' - ' + str((xdata[len(xdata)-1]).date()))
if one_day:
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
else:
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gcf().autofmt_xdate()
plt.grid(True)
plt.legend()
plt.ylabel('Lux')
plt.tight_layout()
plt.savefig(output_file, dpi=120)
plt.gcf().clear()
print 'Ambient light graph saved as ' + output_file
with open(output_csv_file, 'wb') as csvfile:
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
wr.writerow(['date', 'visible', 'infrared'])
for item in sensor_data:
wr.writerow([str(item[0]), item[1], item[2]])
print 'Ambient light csv saved as ' + output_csv_file
def generate_graphs_for_day(cur, th_sensor_table, out_sky_img_file, out_sky_csv_file, out_amb_img_file, out_amb_csv_file, \
out_light_img_file, out_light_csv_file):
print 'Fetching cloud sensor data for 1 day'
cur.execute("SELECT * from cloud_sensor WHERE time >= NOW() - INTERVAL 1 DAY")
plot_sky_temp(cur.fetchall(), one_day=True, output_file=out_sky_img_file, \
output_csv_file=out_sky_csv_file)
###
print '\nFetching external sensor data for 1 day'
cur.execute("SELECT * from " + th_sensor_table + " WHERE time >= NOW() - INTERVAL 1 DAY")
plot_ambient_temp(cur.fetchall(), one_day=True, output_file=out_amb_img_file,\
output_csv_file=out_amb_csv_file)
###
if out_light_img_file is not None:
print '\nFetching ambient light sensor data for 1 day'
cur.execute("SELECT * from ambient_sensor WHERE time >= NOW() - INTERVAL 1 DAY")
plot_ambient_light(cur.fetchall(), one_day=True, output_file=out_light_img_file,\
output_csv_file=out_light_csv_file)
def generate_graphs_for_week(cur, th_sensor_table, out_sky_img_file, out_sky_csv_file, out_amb_img_file, out_amb_csv_file, \
out_light_img_file, out_light_csv_file):
print '\nFetching cloud sensor data for 1 week'
cur.execute("SELECT * from cloud_sensor WHERE time >= NOW() - INTERVAL 1 WEEK")
plot_sky_temp(cur.fetchall(), one_day=False, output_file=out_sky_img_file,\
output_csv_file=out_sky_csv_file)
###
print '\nFetching external sensor data for 1 week'
cur.execute("SELECT * from " + th_sensor_table + " WHERE time >= NOW() - INTERVAL 1 WEEK")
plot_ambient_temp(cur.fetchall(), one_day=False, output_file=out_amb_img_file,\
output_csv_file=out_amb_csv_file)
###
if out_light_img_file is not None:
print '\nFetching ambient light sensor data for 1 week'
cur.execute("SELECT * from ambient_sensor WHERE time >= NOW() - INTERVAL 1 WEEK")
plot_ambient_light(cur.fetchall(), one_day=False, output_file=out_light_img_file,\
output_csv_file=out_light_csv_file)
def generate_graphs_for_month(cur, th_sensor_table, out_sky_img_file, out_sky_csv_file, out_amb_img_file, out_amb_csv_file, \
out_light_img_file, out_light_csv_file):
print '\nFetching cloud sensor data for 1 month'
cur.execute("SELECT * from cloud_sensor WHERE time >= NOW() - INTERVAL 1 MONTH")
plot_sky_temp(cur.fetchall(), one_day=False, output_file=out_sky_img_file,\
output_csv_file=out_sky_csv_file)
###
print '\nFetching external sensor data for 1 month'
cur.execute("SELECT * from " + th_sensor_table + " WHERE time >= NOW() - INTERVAL 1 MONTH")
plot_ambient_temp(cur.fetchall(), one_day=False, output_file=out_amb_img_file,\
output_csv_file=out_amb_csv_file)
###
if out_light_img_file is not None:
print '\nFetching ambient light sensor data for 1 month'
cur.execute("SELECT * from ambient_sensor WHERE time >= NOW() - INTERVAL 1 MONTH")
plot_ambient_light(cur.fetchall(), one_day=False, output_file=out_light_img_file,\
output_csv_file=out_light_csv_file)
def generate_graphs_for_year(cur, th_sensor_table, out_sky_img_file, out_sky_csv_file, out_amb_img_file, out_amb_csv_file, \
out_light_img_file, out_light_csv_file):
print '\nFetching cloud sensor data for 1 year'
cur.execute("SELECT * from cloud_sensor WHERE time >= NOW() - INTERVAL 1 YEAR")
plot_sky_temp(cur.fetchall(), one_day=False, output_file=out_sky_img_file,\
output_csv_file=out_sky_csv_file)
###
print '\nFetching external dh22 sensor data for 1 year'
cur.execute("SELECT * from " + th_sensor_table + " WHERE time >= NOW() - INTERVAL 1 YEAR")
plot_ambient_temp(cur.fetchall(), one_day=False, output_file=out_amb_img_file,\
output_csv_file=out_amb_csv_file)
###
if out_light_img_file is not None:
print '\nFetching ambient light sensor data for 1 year'
cur.execute("SELECT * from ambient_sensor WHERE time >= NOW() - INTERVAL 1 YEAR")
plot_ambient_light(cur.fetchall(), one_day=False, output_file=out_light_img_file,\
output_csv_file=out_light_csv_file)
def main(args):
db = MySQLdb.connect(host=config.MYSQL_HOST, user=config.MYSQL_USER, \
passwd=config.MYSQL_PASSWORD, db=config.MYSQL_DB, connect_timeout=90)
cur = db.cursor()
th_sensor_table = "external_dh22"
if len(args) == 1:
generate_graphs_for_day(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_DAY, config.CSV_CLOUD_SENSOR_DAY, \
config.PLOT_EXTERNAL_DH22_DAY, config.CSV_EXTERNAL_DH22_DAY, \
config.PLOT_AMBIENT_LIGHT_DAY, config.CSV_AMBIENT_LIGHT_DAY)
generate_graphs_for_week(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_WEEK, config.CSV_CLOUD_SENSOR_WEEK, \
config.PLOT_EXTERNAL_DH22_WEEK, config.CSV_EXTERNAL_DH22_WEEK, \
config.PLOT_AMBIENT_LIGHT_WEEK, config.CSV_AMBIENT_LIGHT_WEEK)
generate_graphs_for_month(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_MONTH, config.CSV_CLOUD_SENSOR_MONTH, \
config.PLOT_EXTERNAL_DH22_MONTH, config.CSV_EXTERNAL_DH22_MONTH, \
config.PLOT_AMBIENT_LIGHT_MONTH, config.CSV_AMBIENT_LIGHT_MONTH)
generate_graphs_for_year(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_YEAR, config.CSV_CLOUD_SENSOR_YEAR, \
config.PLOT_EXTERNAL_DH22_YEAR, config.CSV_EXTERNAL_DH22_YEAR, \
config.PLOT_AMBIENT_LIGHT_YEAR, config.CSV_AMBIENT_LIGHT_YEAR)
else:
if args[1] == 'sensors-day':
generate_graphs_for_day(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_DAY, config.CSV_CLOUD_SENSOR_DAY, \
config.PLOT_EXTERNAL_DH22_DAY, config.CSV_EXTERNAL_DH22_DAY, \
config.PLOT_AMBIENT_LIGHT_DAY, config.CSV_AMBIENT_LIGHT_DAY)
elif args[1] == 'sensors-week':
generate_graphs_for_week(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_WEEK, config.CSV_CLOUD_SENSOR_WEEK, \
config.PLOT_EXTERNAL_DH22_WEEK, config.CSV_EXTERNAL_DH22_WEEK, \
config.PLOT_AMBIENT_LIGHT_WEEK, config.CSV_AMBIENT_LIGHT_WEEK)
elif args[1] == 'sensors-month':
generate_graphs_for_month(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_MONTH, config.CSV_CLOUD_SENSOR_MONTH, \
config.PLOT_EXTERNAL_DH22_MONTH, config.CSV_EXTERNAL_DH22_MONTH, \
config.PLOT_AMBIENT_LIGHT_MONTH, config.CSV_AMBIENT_LIGHT_MONTH)
elif args[1] == 'sensors-year':
generate_graphs_for_year(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_YEAR, config.CSV_CLOUD_SENSOR_YEAR, \
config.PLOT_EXTERNAL_DH22_YEAR, config.CSV_EXTERNAL_DH22_YEAR, \
config.PLOT_AMBIENT_LIGHT_YEAR, config.CSV_AMBIENT_LIGHT_YEAR)
db.close()
### do the same things for Simeiz
db = MySQLdb.connect(host=config.MYSQL_HOST, user=config.MYSQL_USER, \
passwd=config.MYSQL_PASSWORD, db=config.MYSQL_DB_SIMEIZ, connect_timeout=90)
cur = db.cursor()
th_sensor_table = "ambient_sensor"
if len(args) == 1:
generate_graphs_for_day(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_DAY_SIMEIZ, config.CSV_CLOUD_SENSOR_DAY_SIMEIZ, \
config.PLOT_AMBIENT_SENSOR_DAY_SIMEIZ, config.CSV_AMBIENT_SENSOR_DAY_SIMEIZ, None, None)
generate_graphs_for_week(cur, cth_sensor_table, onfig.PLOT_CLOUD_SENSOR_WEEK_SIMEIZ, config.CSV_CLOUD_SENSOR_WEEK_SIMEIZ, \
config.PLOT_AMBIENT_SENSOR_WEEK_SIMEIZ, config.CSV_AMBIENT_SENSOR_WEEK_SIMEIZ, None, None)
generate_graphs_for_month(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_MONTH_SIMEIZ, config.CSV_CLOUD_SENSOR_MONTH_SIMEIZ, \
config.PLOT_AMBIENT_SENSOR_MONTH_SIMEIZ, config.CSV_AMBIENT_SENSOR_MONTH_SIMEIZ, None, None)
generate_graphs_for_year(cur, cth_sensor_table, onfig.PLOT_CLOUD_SENSOR_YEAR_SIMEIZ, config.CSV_CLOUD_SENSOR_YEAR_SIMEIZ, \
config.PLOT_AMBIENT_SENSOR_YEAR_SIMEIZ, config.CSV_AMBIENT_SENSOR_YEAR_SIMEIZ, None, None)
else:
if args[1] == 'sensors-day':
generate_graphs_for_day(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_DAY_SIMEIZ, config.CSV_CLOUD_SENSOR_DAY_SIMEIZ, \
config.PLOT_AMBIENT_SENSOR_DAY_SIMEIZ, config.CSV_AMBIENT_SENSOR_DAY_SIMEIZ, None, None)
elif args[1] == 'sensors-week':
generate_graphs_for_week(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_WEEK_SIMEIZ, config.CSV_CLOUD_SENSOR_WEEK_SIMEIZ, \
config.PLOT_AMBIENT_SENSOR_WEEK_SIMEIZ, config.CSV_AMBIENT_SENSOR_WEEK_SIMEIZ, None, None)
elif args[1] == 'sensors-month':
generate_graphs_for_month(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_MONTH_SIMEIZ, config.CSV_CLOUD_SENSOR_MONTH_SIMEIZ, \
config.PLOT_AMBIENT_SENSOR_MONTH_SIMEIZ, config.CSV_AMBIENT_SENSOR_MONTH_SIMEIZ, None, None)
elif args[1] == 'sensors-year':
generate_graphs_for_year(cur, th_sensor_table, config.PLOT_CLOUD_SENSOR_YEAR_SIMEIZ, config.CSV_CLOUD_SENSOR_YEAR_SIMEIZ, \
config.PLOT_AMBIENT_SENSOR_YEAR_SIMEIZ, config.CSV_AMBIENT_SENSOR_YEAR_SIMEIZ, None, None)
db.close()
print 'Done\n'
if __name__ == "__main__":
main(sys.argv)
| gpl-2.0 |
q1ang/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
mattgiguere/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
porterdf/porterdf.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
datapythonista/pandas | pandas/core/arrays/timedeltas.py | 1 | 36958 | from __future__ import annotations
from datetime import timedelta
from typing import TYPE_CHECKING
import numpy as np
from pandas._libs import (
lib,
tslibs,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Period,
Tick,
Timedelta,
Timestamp,
iNaT,
to_offset,
)
from pandas._libs.tslibs.conversion import (
ensure_timedelta64ns,
precision_from_unit,
)
from pandas._libs.tslibs.fields import get_timedelta_field
from pandas._libs.tslibs.timedeltas import (
array_to_timedelta64,
ints_to_pytimedelta,
parse_timedelta_unit,
)
from pandas._typing import (
DtypeObj,
NpDtype,
)
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.cast import astype_td64_unit_conversion
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
TD64NS_DTYPE,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import (
ABCCategorical,
ABCMultiIndex,
)
from pandas.core.dtypes.missing import isna
from pandas.core import nanops
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import (
ExtensionArray,
IntegerArray,
datetimelike as dtl,
)
from pandas.core.arrays._ranges import generate_regular_range
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas.core.arrays import (
DatetimeArray,
PeriodArray,
)
def _field_accessor(name: str, alias: str, docstring: str):
def f(self) -> np.ndarray:
values = self.asi8
result = get_timedelta_field(values, alias)
if self._hasnans:
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = f"\n{docstring}\n"
return property(f)
class TimedeltaArray(dtl.TimelikeOps):
"""
Pandas ExtensionArray for timedelta data.
.. versionadded:: 0.24.0
.. warning::
TimedeltaArray is currently experimental, and its API may change
without warning. In particular, :attr:`TimedeltaArray.dtype` is
expected to change to be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : array-like
The timedelta data.
dtype : numpy.dtype
Currently, only ``numpy.dtype("timedelta64[ns]")`` is accepted.
freq : Offset, optional
copy : bool, default False
Whether to copy the underlying array of data.
Attributes
----------
None
Methods
-------
None
"""
_typ = "timedeltaarray"
_scalar_type = Timedelta
_recognized_scalars = (timedelta, np.timedelta64, Tick)
_is_recognized_dtype = is_timedelta64_dtype
_infer_matches = ("timedelta", "timedelta64")
__array_priority__ = 1000
# define my properties & methods for delegation
_other_ops: list[str] = []
_bool_ops: list[str] = []
_object_ops: list[str] = ["freq"]
_field_ops: list[str] = ["days", "seconds", "microseconds", "nanoseconds"]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops
_datetimelike_methods: list[str] = [
"to_pytimedelta",
"total_seconds",
"round",
"floor",
"ceil",
]
# Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)
# operates pointwise.
def _box_func(self, x) -> Timedelta | NaTType:
return Timedelta(x, unit="ns")
@property
# error: Return type "dtype" of "dtype" incompatible with return type
# "ExtensionDtype" in supertype "ExtensionArray"
def dtype(self) -> np.dtype: # type: ignore[override]
"""
The dtype for the TimedeltaArray.
.. warning::
A future version of pandas will change dtype to be an instance
of a :class:`pandas.api.extensions.ExtensionDtype` subclass,
not a ``numpy.dtype``.
Returns
-------
numpy.dtype
"""
return TD64NS_DTYPE
# ----------------------------------------------------------------
# Constructors
_freq = None
def __init__(
self, values, dtype=TD64NS_DTYPE, freq=lib.no_default, copy: bool = False
):
values = extract_array(values, extract_numpy=True)
if isinstance(values, IntegerArray):
values = values.to_numpy("int64", na_value=tslibs.iNaT)
inferred_freq = getattr(values, "_freq", None)
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
if isinstance(values, type(self)):
if explicit_none:
# dont inherit from values
pass
elif freq is None:
freq = values.freq
elif freq and values.freq:
freq = to_offset(freq)
freq, _ = dtl.validate_inferred_freq(freq, values.freq, False)
values = values._ndarray
if not isinstance(values, np.ndarray):
msg = (
f"Unexpected type '{type(values).__name__}'. 'values' must be a "
"TimedeltaArray, ndarray, or Series or Index containing one of those."
)
raise ValueError(msg)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(TD64NS_DTYPE)
_validate_td64_dtype(values.dtype)
dtype = _validate_td64_dtype(dtype)
if freq == "infer":
msg = (
"Frequency inference not allowed in TimedeltaArray.__init__. "
"Use 'pd.array()' instead."
)
raise ValueError(msg)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
NDArrayBacked.__init__(self, values=values, dtype=dtype)
self._freq = freq
if inferred_freq is None and freq is not None:
type(self)._validate_frequency(self, freq)
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=TD64NS_DTYPE
) -> TimedeltaArray:
assert dtype == TD64NS_DTYPE, dtype
assert isinstance(values, np.ndarray), type(values)
assert values.dtype == TD64NS_DTYPE
result = super()._simple_new(values=values, dtype=TD64NS_DTYPE)
result._freq = freq
return result
@classmethod
def _from_sequence(
cls, data, *, dtype=TD64NS_DTYPE, copy: bool = False
) -> TimedeltaArray:
if dtype:
_validate_td64_dtype(dtype)
data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=None)
freq, _ = dtl.validate_inferred_freq(None, inferred_freq, False)
return cls._simple_new(data, freq=freq)
@classmethod
def _from_sequence_not_strict(
cls,
data,
dtype=TD64NS_DTYPE,
copy: bool = False,
freq=lib.no_default,
unit=None,
) -> TimedeltaArray:
if dtype:
_validate_td64_dtype(dtype)
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
if explicit_none:
freq = None
result = cls._simple_new(data, freq=freq)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
@classmethod
def _generate_range(cls, start, end, periods, freq, closed=None):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
if start is not None:
start = Timedelta(start)
if end is not None:
end = Timedelta(end)
left_closed, right_closed = dtl.validate_endpoints(closed)
if freq is not None:
index = generate_regular_range(start, end, periods, freq)
else:
index = np.linspace(start.value, end.value, periods).astype("i8")
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return cls._simple_new(index.view("m8[ns]"), freq=freq)
# ----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value, setitem: bool = False) -> np.timedelta64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timedelta.")
self._check_compatible_with(value, setitem=setitem)
return np.timedelta64(value.value, "ns")
def _scalar_from_string(self, value) -> Timedelta | NaTType:
return Timedelta(value)
def _check_compatible_with(self, other, setitem: bool = False) -> None:
# we don't have anything to validate.
pass
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def astype(self, dtype, copy: bool = True):
# We handle
# --> timedelta64[ns]
# --> timedelta64
# DatetimeLikeArrayMixin super call handles other cases
dtype = pandas_dtype(dtype)
if dtype.kind == "m":
return astype_td64_unit_conversion(self._ndarray, dtype, copy=copy)
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy)
def __iter__(self):
if self.ndim > 1:
for i in range(len(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = (length // chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = ints_to_pytimedelta(data[start_i:end_i], box=True)
yield from converted
# ----------------------------------------------------------------
# Reductions
def sum(
self,
*,
axis: int | None = None,
dtype: NpDtype | None = None,
out=None,
keepdims: bool = False,
initial=None,
skipna: bool = True,
min_count: int = 0,
):
nv.validate_sum(
(), {"dtype": dtype, "out": out, "keepdims": keepdims, "initial": initial}
)
result = nanops.nansum(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
return self._wrap_reduction_result(axis, result)
def std(
self,
*,
axis: int | None = None,
dtype: NpDtype | None = None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std"
)
result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
if axis is None or self.ndim == 1:
return self._box_func(result)
return self._from_backing_data(result)
# ----------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed: bool = False):
from pandas.io.formats.format import get_format_timedelta64
return get_format_timedelta64(self, box=True)
@dtl.ravel_compat
def _format_native_types(
self, na_rep="NaT", date_format=None, **kwargs
) -> np.ndarray:
from pandas.io.formats.format import get_format_timedelta64
formatter = get_format_timedelta64(self._ndarray, na_rep)
return np.array([formatter(x) for x in self._ndarray])
# ----------------------------------------------------------------
# Arithmetic Methods
def _add_offset(self, other):
assert not isinstance(other, Tick)
raise TypeError(
f"cannot add the type {type(other).__name__} to a {type(self).__name__}"
)
def _add_period(self, other: Period) -> PeriodArray:
"""
Add a Period object.
"""
# We will wrap in a PeriodArray and defer to the reversed operation
from pandas.core.arrays.period import PeriodArray
i8vals = np.broadcast_to(other.ordinal, self.shape)
oth = PeriodArray(i8vals, freq=other.freq)
return oth + self
def _add_datetime_arraylike(self, other):
"""
Add DatetimeArray/Index or ndarray[datetime64] to TimedeltaArray.
"""
if isinstance(other, np.ndarray):
# At this point we have already checked that dtype is datetime64
from pandas.core.arrays import DatetimeArray
other = DatetimeArray(other)
# defer to implementation in DatetimeArray
return other + self
def _add_datetimelike_scalar(self, other) -> DatetimeArray:
# adding a timedeltaindex to a datetimelike
from pandas.core.arrays import DatetimeArray
assert other is not NaT
other = Timestamp(other)
if other is NaT:
# In this case we specifically interpret NaT as a datetime, not
# the timedelta interpretation we would get by returning self + NaT
result = self.asi8.view("m8[ms]") + NaT.to_datetime64()
return DatetimeArray(result)
i8 = self.asi8
result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result)
dtype = DatetimeTZDtype(tz=other.tz) if other.tz else DT64NS_DTYPE
return DatetimeArray(result, dtype=dtype, freq=self.freq)
def _addsub_object_array(self, other, op):
# Add or subtract Array-like of objects
try:
# TimedeltaIndex can only operate with a subset of DateOffset
# subclasses. Incompatible classes will raise AttributeError,
# which we re-raise as TypeError
return super()._addsub_object_array(other, op)
except AttributeError as err:
raise TypeError(
f"Cannot add/subtract non-tick DateOffset to {type(self).__name__}"
) from err
@unpack_zerodim_and_defer("__mul__")
def __mul__(self, other) -> TimedeltaArray:
if is_scalar(other):
# numpy will accept float and int, raise TypeError for others
result = self._ndarray * other
freq = None
if self.freq is not None and not isna(other):
freq = self.freq * other
return type(self)(result, freq=freq)
if not hasattr(other, "dtype"):
# list, tuple
other = np.array(other)
if len(other) != len(self) and not is_timedelta64_dtype(other.dtype):
# Exclude timedelta64 here so we correctly raise TypeError
# for that instead of ValueError
raise ValueError("Cannot multiply with unequal lengths")
if is_object_dtype(other.dtype):
# this multiplication will succeed only if all elements of other
# are int or float scalars, so we will end up with
# timedelta64[ns]-dtyped result
result = [self[n] * other[n] for n in range(len(self))]
result = np.array(result)
return type(self)(result)
# numpy will accept float or int dtype, raise TypeError for others
result = self._ndarray * other
return type(self)(result)
__rmul__ = __mul__
@unpack_zerodim_and_defer("__truediv__")
def __truediv__(self, other):
# timedelta / X is well-defined for timedelta-like or numeric X
if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
if other is NaT:
# specifically timedelta64-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# otherwise, dispatch to Timedelta implementation
return self._ndarray / other
elif lib.is_scalar(other):
# assume it is numeric
result = self._ndarray / other
freq = None
if self.freq is not None:
# Tick division is not implemented, so operate on Timedelta
freq = self.freq.delta / other
return type(self)(result, freq=freq)
if not hasattr(other, "dtype"):
# e.g. list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide vectors with unequal lengths")
elif is_timedelta64_dtype(other.dtype):
# let numpy handle it
return self._ndarray / other
elif is_object_dtype(other.dtype):
# We operate on raveled arrays to avoid problems in inference
# on NaT
srav = self.ravel()
orav = other.ravel()
result = [srav[n] / orav[n] for n in range(len(srav))]
result = np.array(result).reshape(self.shape)
# We need to do dtype inference in order to keep DataFrame ops
# behavior consistent with Series behavior
inferred = lib.infer_dtype(result)
if inferred == "timedelta":
flat = result.ravel()
result = type(self)._from_sequence(flat).reshape(result.shape)
elif inferred == "floating":
result = result.astype(float)
return result
else:
result = self._ndarray / other
return type(self)(result)
@unpack_zerodim_and_defer("__rtruediv__")
def __rtruediv__(self, other):
# X / timedelta is defined only for timedelta-like X
if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
if other is NaT:
# specifically timedelta64-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# otherwise, dispatch to Timedelta implementation
return other / self._ndarray
elif lib.is_scalar(other):
raise TypeError(
f"Cannot divide {type(other).__name__} by {type(self).__name__}"
)
if not hasattr(other, "dtype"):
# e.g. list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide vectors with unequal lengths")
elif is_timedelta64_dtype(other.dtype):
# let numpy handle it
return other / self._ndarray
elif is_object_dtype(other.dtype):
# Note: unlike in __truediv__, we do not _need_ to do type
# inference on the result. It does not raise, a numeric array
# is returned. GH#23829
result = [other[n] / self[n] for n in range(len(self))]
return np.array(result)
else:
raise TypeError(
f"Cannot divide {other.dtype} data by {type(self).__name__}"
)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
if is_scalar(other):
if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
if other is NaT:
# treat this specifically as timedelta-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# dispatch to Timedelta implementation
result = other.__rfloordiv__(self._ndarray)
return result
# at this point we should only have numeric scalars; anything
# else will raise
result = self.asi8 // other
np.putmask(result, self._isnan, iNaT)
freq = None
if self.freq is not None:
# Note: freq gets division, not floor-division
freq = self.freq / other
if freq.nanos == 0 and self.freq.nanos != 0:
# e.g. if self.freq is Nano(1) then dividing by 2
# rounds down to zero
freq = None
return type(self)(result.view("m8[ns]"), freq=freq)
if not hasattr(other, "dtype"):
# list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide with unequal lengths")
elif is_timedelta64_dtype(other.dtype):
other = type(self)(other)
# numpy timedelta64 does not natively support floordiv, so operate
# on the i8 values
result = self.asi8 // other.asi8
mask = self._isnan | other._isnan
if mask.any():
result = result.astype(np.float64)
np.putmask(result, mask, np.nan)
return result
elif is_object_dtype(other.dtype):
# error: Incompatible types in assignment (expression has type
# "List[Any]", variable has type "ndarray")
result = [ # type: ignore[assignment]
self[n] // other[n] for n in range(len(self))
]
result = np.array(result)
if lib.infer_dtype(result, skipna=False) == "timedelta":
result, _ = sequence_to_td64ns(result)
return type(self)(result)
return result
elif is_integer_dtype(other.dtype) or is_float_dtype(other.dtype):
result = self._ndarray // other
return type(self)(result)
else:
dtype = getattr(other, "dtype", type(other).__name__)
raise TypeError(f"Cannot divide {dtype} by {type(self).__name__}")
@unpack_zerodim_and_defer("__rfloordiv__")
def __rfloordiv__(self, other):
if is_scalar(other):
if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
if other is NaT:
# treat this specifically as timedelta-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# dispatch to Timedelta implementation
result = other.__floordiv__(self._ndarray)
return result
raise TypeError(
f"Cannot divide {type(other).__name__} by {type(self).__name__}"
)
if not hasattr(other, "dtype"):
# list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide with unequal lengths")
elif is_timedelta64_dtype(other.dtype):
other = type(self)(other)
# numpy timedelta64 does not natively support floordiv, so operate
# on the i8 values
result = other.asi8 // self.asi8
mask = self._isnan | other._isnan
if mask.any():
result = result.astype(np.float64)
np.putmask(result, mask, np.nan)
return result
elif is_object_dtype(other.dtype):
# error: Incompatible types in assignment (expression has type
# "List[Any]", variable has type "ndarray")
result = [ # type: ignore[assignment]
other[n] // self[n] for n in range(len(self))
]
result = np.array(result)
return result
else:
dtype = getattr(other, "dtype", type(other).__name__)
raise TypeError(f"Cannot divide {dtype} by {type(self).__name__}")
@unpack_zerodim_and_defer("__mod__")
def __mod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
return self - (self // other) * other
@unpack_zerodim_and_defer("__rmod__")
def __rmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
return other - (other // self) * self
@unpack_zerodim_and_defer("__divmod__")
def __divmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
res1 = self // other
res2 = self - res1 * other
return res1, res2
@unpack_zerodim_and_defer("__rdivmod__")
def __rdivmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, self._recognized_scalars):
other = Timedelta(other)
res1 = other // self
res2 = other - res1 * self
return res1, res2
def __neg__(self) -> TimedeltaArray:
if self.freq is not None:
return type(self)(-self._ndarray, freq=-self.freq)
return type(self)(-self._ndarray)
def __pos__(self) -> TimedeltaArray:
return type(self)(self._ndarray, freq=self.freq)
def __abs__(self) -> TimedeltaArray:
# Note: freq is not preserved
return type(self)(np.abs(self._ndarray))
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timedelta methods
def total_seconds(self) -> np.ndarray:
"""
Return total duration of each element expressed in seconds.
This method is available directly on TimedeltaArray, TimedeltaIndex
and on Series containing timedelta values under the ``.dt`` namespace.
Returns
-------
seconds : [ndarray, Float64Index, Series]
When the calling object is a TimedeltaArray, the return type
is ndarray. When the calling object is a TimedeltaIndex,
the return type is a Float64Index. When the calling object
is a Series, the return type is Series of type `float64` whose
index is the same as the original.
See Also
--------
datetime.timedelta.total_seconds : Standard library version
of this method.
TimedeltaIndex.components : Return a DataFrame with components of
each Timedelta.
Examples
--------
**Series**
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))
>>> s
0 0 days
1 1 days
2 2 days
3 3 days
4 4 days
dtype: timedelta64[ns]
>>> s.dt.total_seconds()
0 0.0
1 86400.0
2 172800.0
3 259200.0
4 345600.0
dtype: float64
**TimedeltaIndex**
>>> idx = pd.to_timedelta(np.arange(5), unit='d')
>>> idx
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
>>> idx.total_seconds()
Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0],
dtype='float64')
"""
return self._maybe_mask_results(1e-9 * self.asi8, fill_value=None)
def to_pytimedelta(self) -> np.ndarray:
"""
Return Timedelta Array/Index as object ndarray of datetime.timedelta
objects.
Returns
-------
timedeltas : ndarray[object]
"""
return tslibs.ints_to_pytimedelta(self.asi8)
days = _field_accessor("days", "days", "Number of days for each element.")
seconds = _field_accessor(
"seconds",
"seconds",
"Number of seconds (>= 0 and less than 1 day) for each element.",
)
microseconds = _field_accessor(
"microseconds",
"microseconds",
"Number of microseconds (>= 0 and less than 1 second) for each element.",
)
nanoseconds = _field_accessor(
"nanoseconds",
"nanoseconds",
"Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.",
)
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = [
"days",
"hours",
"minutes",
"seconds",
"milliseconds",
"microseconds",
"nanoseconds",
]
hasnans = self._hasnans
if hasnans:
def f(x):
if isna(x):
return [np.nan] * len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self], columns=columns)
if not hasnans:
result = result.astype("int64")
return result
# ---------------------------------------------------------------------
# Constructor Helpers
def sequence_to_td64ns(
data, copy: bool = False, unit=None, errors="raise"
) -> tuple[np.ndarray, Tick | None]:
"""
Parameters
----------
data : list-like
copy : bool, default False
unit : str, optional
The timedelta unit to treat integers as multiples of. For numeric
data this defaults to ``'ns'``.
Must be un-specified if the data contains a str and ``errors=="raise"``.
errors : {"raise", "coerce", "ignore"}, default "raise"
How to handle elements that cannot be converted to timedelta64[ns].
See ``pandas.to_timedelta`` for details.
Returns
-------
converted : numpy.ndarray
The sequence converted to a numpy array with dtype ``timedelta64[ns]``.
inferred_freq : Tick or None
The inferred frequency of the sequence.
Raises
------
ValueError : Data cannot be converted to timedelta64[ns].
Notes
-----
Unlike `pandas.to_timedelta`, if setting ``errors=ignore`` will not cause
errors to be ignored; they are caught and subsequently ignored at a
higher level.
"""
inferred_freq = None
if unit is not None:
unit = parse_timedelta_unit(unit)
# Unwrap whatever we have into a np.ndarray
if not hasattr(data, "dtype"):
# e.g. list, tuple
if np.ndim(data) == 0:
# i.e. generator
data = list(data)
data = np.array(data, copy=False)
elif isinstance(data, ABCMultiIndex):
raise TypeError("Cannot create a DatetimeArray from a MultiIndex.")
else:
data = extract_array(data, extract_numpy=True)
if isinstance(data, IntegerArray):
data = data.to_numpy("int64", na_value=iNaT)
elif not isinstance(data, (np.ndarray, ExtensionArray)):
# GH#24539 e.g. xarray, dask object
data = np.asarray(data)
elif isinstance(data, ABCCategorical):
data = data.categories.take(data.codes, fill_value=NaT)._values
copy = False
if isinstance(data, TimedeltaArray):
inferred_freq = data.freq
# Convert whatever we have into timedelta64[ns] dtype
if is_object_dtype(data.dtype) or is_string_dtype(data.dtype):
# no need to make a copy, need to convert if string-dtyped
data = objects_to_td64ns(data, unit=unit, errors=errors)
copy = False
elif is_integer_dtype(data.dtype):
# treat as multiples of the given unit
data, copy_made = ints_to_td64ns(data, unit=unit)
copy = copy and not copy_made
elif is_float_dtype(data.dtype):
# cast the unit, multiply base/frac separately
# to avoid precision issues from float -> int
mask = np.isnan(data)
m, p = precision_from_unit(unit or "ns")
base = data.astype(np.int64)
frac = data - base
if p:
frac = np.round(frac, p)
data = (base * m + (frac * m).astype(np.int64)).view("timedelta64[ns]")
data[mask] = iNaT
copy = False
elif is_timedelta64_dtype(data.dtype):
if data.dtype != TD64NS_DTYPE:
# non-nano unit
data = ensure_timedelta64ns(data)
copy = False
else:
# This includes datetime64-dtype, see GH#23539, GH#29794
raise TypeError(f"dtype {data.dtype} cannot be converted to timedelta64[ns]")
data = np.array(data, copy=copy)
assert data.dtype == "m8[ns]", data
return data, inferred_freq
def ints_to_td64ns(data, unit="ns"):
"""
Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating
the integers as multiples of the given timedelta unit.
Parameters
----------
data : numpy.ndarray with integer-dtype
unit : str, default "ns"
The timedelta unit to treat integers as multiples of.
Returns
-------
numpy.ndarray : timedelta64[ns] array converted from data
bool : whether a copy was made
"""
copy_made = False
unit = unit if unit is not None else "ns"
if data.dtype != np.int64:
# converting to int64 makes a copy, so we can avoid
# re-copying later
data = data.astype(np.int64)
copy_made = True
if unit != "ns":
dtype_str = f"timedelta64[{unit}]"
data = data.view(dtype_str)
data = ensure_timedelta64ns(data)
# the astype conversion makes a copy, so we can avoid re-copying later
copy_made = True
else:
data = data.view("timedelta64[ns]")
return data, copy_made
def objects_to_td64ns(data, unit=None, errors="raise"):
"""
Convert a object-dtyped or string-dtyped array into an
timedelta64[ns]-dtyped array.
Parameters
----------
data : ndarray or Index
unit : str, default "ns"
The timedelta unit to treat integers as multiples of.
Must not be specified if the data contains a str.
errors : {"raise", "coerce", "ignore"}, default "raise"
How to handle elements that cannot be converted to timedelta64[ns].
See ``pandas.to_timedelta`` for details.
Returns
-------
numpy.ndarray : timedelta64[ns] array converted from data
Raises
------
ValueError : Data cannot be converted to timedelta64[ns].
Notes
-----
Unlike `pandas.to_timedelta`, if setting `errors=ignore` will not cause
errors to be ignored; they are caught and subsequently ignored at a
higher level.
"""
# coerce Index to np.ndarray, converting string-dtype if necessary
values = np.array(data, dtype=np.object_, copy=False)
result = array_to_timedelta64(values, unit=unit, errors=errors)
return result.view("timedelta64[ns]")
def _validate_td64_dtype(dtype) -> DtypeObj:
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, np.dtype("timedelta64")):
# no precision disallowed GH#24806
msg = (
"Passing in 'timedelta' dtype with no precision is not allowed. "
"Please pass in 'timedelta64[ns]' instead."
)
raise ValueError(msg)
if not is_dtype_equal(dtype, TD64NS_DTYPE):
raise ValueError(f"dtype {dtype} cannot be converted to timedelta64[ns]")
return dtype
| bsd-3-clause |
phobson/statsmodels | statsmodels/tsa/ar_model.py | 1 | 33913 | from __future__ import division
from statsmodels.compat.python import iteritems, range, string_types, lmap, long
import numpy as np
from numpy import dot, identity
from numpy.linalg import inv, slogdet
from scipy.stats import norm
from statsmodels.regression.linear_model import OLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.model as base
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly, cache_writable)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tsa.kalmanf.kalmanfilter import KalmanFilter
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.vector_ar import util
from statsmodels.tsa.base.datetools import _index_date
__all__ = ['AR']
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x**2, axis=0)
def _check_ar_start(start, k_ar, method, dynamic):
if (method == 'cmle' or dynamic) and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE "
"or dynamic forecast. Got %d" % start)
def _validate(start, k_ar, dates, method):
"""
Checks the date and then returns an integer
"""
from datetime import datetime
if isinstance(start, (string_types, datetime)):
start_date = start
start = _index_date(start, dates)
if 'mle' not in method and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE or "
"dynamic forecast. Got %s" % start_date)
return start
def _ar_predict_out_of_sample(y, params, p, k_trend, steps, start=0):
mu = params[:k_trend] or 0 # only have to worry about constant
arparams = params[k_trend:][::-1] # reverse for dot
# dynamic endogenous variable
endog = np.zeros(p + steps) # this is one too big but doesn't matter
if start:
endog[:p] = y[start-p:start]
else:
endog[:p] = y[-p:]
forecast = np.zeros(steps)
for i in range(steps):
fcast = mu + np.dot(arparams, endog[i:i+p])
forecast[i] = fcast
endog[i + p] = fcast
return forecast
class AR(tsbase.TimeSeriesModel):
__doc__ = tsbase._tsa_doc % {"model" : "Autoregressive AR(p) model",
"params" : """endog : array-like
1-d endogenous response variable. The independent variable.""",
"extra_params" : base._missing_param_doc,
"extra_sections" : ""}
def __init__(self, endog, dates=None, freq=None, missing='none'):
super(AR, self).__init__(endog, None, dates, freq, missing=missing)
endog = self.endog # original might not have been an ndarray
if endog.ndim == 1:
endog = endog[:, None]
self.endog = endog # to get shapes right
elif endog.ndim > 1 and endog.shape[1] != 1:
raise ValueError("Only the univariate case is implemented")
def initialize(self):
pass
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
p = self.k_ar
k = self.k_trend
newparams = params.copy()
newparams[k:k+p] = _ar_transparams(params[k:k+p].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
p = self.k_ar
k = self.k_trend
newparams = start_params.copy()
newparams[k:k+p] = _ar_invtransparams(start_params[k:k+p].copy())
return newparams
def _presample_fit(self, params, start, p, end, y, predictedvalues):
"""
Return the pre-sample predicted values using the Kalman Filter
Notes
-----
See predict method for how to use start and p.
"""
k = self.k_trend
# build system matrices
T_mat = KalmanFilter.T(params, p, k, p)
R_mat = KalmanFilter.R(params, p, k, 0, p)
# Initial State mean and variance
alpha = np.zeros((p, 1))
Q_0 = dot(inv(identity(p**2)-np.kron(T_mat, T_mat)),
dot(R_mat, R_mat.T).ravel('F'))
Q_0 = Q_0.reshape(p, p, order='F') # TODO: order might need to be p+k
P = Q_0
Z_mat = KalmanFilter.Z(p)
for i in range(end): # iterate p-1 times to fit presample
v_mat = y[i] - dot(Z_mat, alpha)
F_mat = dot(dot(Z_mat, P), Z_mat.T)
Finv = 1./F_mat # inv. always scalar
K = dot(dot(dot(T_mat, P), Z_mat.T), Finv)
# update state
alpha = dot(T_mat, alpha) + dot(K, v_mat)
L = T_mat - dot(K, Z_mat)
P = dot(dot(T_mat, P), L.T) + dot(R_mat, R_mat.T)
#P[0,0] += 1 # for MA part, R_mat.R_mat.T above
if i >= start - 1: # only record if we ask for it
predictedvalues[i + 1 - start] = dot(Z_mat, alpha)
def _get_predict_start(self, start, dynamic):
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
if start is None:
if method == 'mle' and not dynamic:
start = 0
else: # can't do presample fit for cmle or dynamic
start = k_ar
elif isinstance(start, (int, long)):
start = super(AR, self)._get_predict_start(start)
else: # should be a date
start = _validate(start, k_ar, self.data.dates, method)
start = super(AR, self)._get_predict_start(start)
_check_ar_start(start, k_ar, method, dynamic)
self._set_predict_start_date(start)
return start
def predict(self, params, start=None, end=None, dynamic=False):
"""
Returns in-sample and out-of-sample prediction.
Parameters
----------
params : array
The fitted model parameters.
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
dynamic : bool
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
Returns
-------
predicted values : array
Notes
-----
The linear Gaussian Kalman filter is used to return pre-sample fitted
values. The exact initial Kalman Filter is used. See Durbin and Koopman
in the references for more information.
"""
# will return an index of a date
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end)
if start - end > 1:
raise ValueError("end is before start")
k_ar = self.k_ar
k_trend = self.k_trend
method = self.method
endog = self.endog.squeeze()
if dynamic:
out_of_sample += end - start + 1
return _ar_predict_out_of_sample(endog, params, k_ar,
k_trend, out_of_sample, start)
predictedvalues = np.zeros(end + 1 - start)
# fit pre-sample
if method == 'mle': # use Kalman Filter to get initial values
if k_trend:
mu = params[0]/(1-np.sum(params[k_trend:]))
# modifies predictedvalues in place
if start < k_ar:
self._presample_fit(params, start, k_ar, min(k_ar-1, end),
endog[:k_ar] - mu, predictedvalues)
predictedvalues[:k_ar-start] += mu
if end < k_ar:
return predictedvalues
# just do the whole thing and truncate
fittedvalues = dot(self.X, params)
pv_start = max(k_ar - start, 0)
fv_start = max(start - k_ar, 0)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[pv_start:] = fittedvalues[fv_start:fv_end]
if out_of_sample:
forecastvalues = _ar_predict_out_of_sample(endog, params,
k_ar, k_trend,
out_of_sample)
predictedvalues = np.r_[predictedvalues, forecastvalues]
return predictedvalues
def _presample_varcov(self, params):
"""
Returns the inverse of the presample variance-covariance.
Notes
-----
See Hamilton p. 125
"""
k = self.k_trend
p = self.k_ar
p1 = p+1
# get inv(Vp) Hamilton 5.3.7
params0 = np.r_[-1, params[k:]]
Vpinv = np.zeros((p, p), dtype=params.dtype)
for i in range(1, p1):
Vpinv[i-1, i-1:] = np.correlate(params0, params0[:i],)[:-1]
Vpinv[i-1, i-1:] -= np.correlate(params0[-i:], params0,)[:-1]
Vpinv = Vpinv + Vpinv.T - np.diag(Vpinv.diagonal())
return Vpinv
def _loglike_css(self, params):
"""
Loglikelihood of AR(p) process using conditional sum of squares
"""
nobs = self.nobs
Y = self.Y
X = self.X
ssr = sumofsq(Y.squeeze() - np.dot(X, params))
sigma2 = ssr/nobs
return (-nobs/2 * (np.log(2 * np.pi) + np.log(sigma2)) -
ssr/(2 * sigma2))
def _loglike_mle(self, params):
"""
Loglikelihood of AR(p) process using exact maximum likelihood
"""
nobs = self.nobs
X = self.X
endog = self.endog
k_ar = self.k_ar
k_trend = self.k_trend
# reparameterize according to Jones (1980) like in ARMA/Kalman Filter
if self.transparams:
params = self._transparams(params)
# get mean and variance for pre-sample lags
yp = endog[:k_ar].copy()
if k_trend:
c = [params[0]] * k_ar
else:
c = [0]
mup = np.asarray(c / (1 - np.sum(params[k_trend:])))
diffp = yp - mup[:, None]
# get inv(Vp) Hamilton 5.3.7
Vpinv = self._presample_varcov(params)
diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
ssr = sumofsq(endog[k_ar:].squeeze() - np.dot(X, params))
# concentrating the likelihood means that sigma2 is given by
sigma2 = 1./nobs * (diffpVpinv + ssr)
self.sigma2 = sigma2
logdet = slogdet(Vpinv)[1] # TODO: add check for singularity
loglike = -1/2. * (nobs * (np.log(2 * np.pi) + np.log(sigma2)) -
logdet + diffpVpinv / sigma2 + ssr / sigma2)
return loglike
def loglike(self, params):
"""
The loglikelihood of an AR(p) process
Parameters
----------
params : array
The fitted parameters of the AR model
Returns
-------
llf : float
The loglikelihood evaluated at `params`
Notes
-----
Contains constant term. If the model is fit by OLS then this returns
the conditonal maximum likelihood.
.. math:: \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)+\\log\\left(\\sigma^{2}\\right)\\right)-\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}
If it is fit by MLE then the (exact) unconditional maximum likelihood
is returned.
.. math:: -\\frac{n}{2}log\\left(2\\pi\\right)-\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)+\\frac{1}{2}\\left|V_{p}^{-1}\\right|-\\frac{1}{2\\sigma^{2}}\\left(y_{p}-\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)-\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}
where
:math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the
mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)
variance-covariance matrix of the first `p` observations.
"""
#TODO: Math is on Hamilton ~pp 124-5
if self.method == "cmle":
return self._loglike_css(params)
else:
return self._loglike_mle(params)
def score(self, params):
"""
Return the gradient of the loglikelihood at params.
Parameters
----------
params : array-like
The parameter values at which to evaluate the score function.
Notes
-----
Returns numerical gradient.
"""
loglike = self.loglike
return approx_fprime(params, loglike, epsilon=1e-8)
def information(self, params):
"""
Not Implemented Yet
"""
return
def hessian(self, params):
"""
Returns numerical hessian for now.
"""
loglike = self.loglike
return approx_hess(params, loglike)
def _stackX(self, k_ar, trend):
"""
Private method to build the RHS matrix for estimation.
Columns are trend terms then lags.
"""
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend)
self.k_trend = k_trend
return X
def select_order(self, maxlag, ic, trend='c', method='mle'):
"""
Select the lag order according to the information criterion.
Parameters
----------
maxlag : int
The highest lag length tried. See `AR.fit`.
ic : str {'aic','bic','hqic','t-stat'}
Criterion used for selecting the optimal lag length.
See `AR.fit`.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
Returns
-------
bestlag : int
Best lag according to IC.
"""
endog = self.endog
# make Y and X with same nobs to compare ICs
Y = endog[maxlag:]
self.Y = Y # attach to get correct fit stats
X = self._stackX(maxlag, trend) # sets k_trend
self.X = X
k = self.k_trend # k_trend set in _stackX
k = max(1, k) # handle if startlag is 0
results = {}
if ic != 't-stat':
for lag in range(k, maxlag+1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag-lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=100, disp=0)
results[lag] = eval('fit.'+ic)
bestic, bestlag = min((res, k) for k, res in iteritems(results))
else: # choose by last t-stat.
stop = 1.6448536269514722 # for t-stat, norm.ppf(.95)
for lag in range(maxlag, k - 1, -1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag - lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=35, disp=-1)
if np.abs(fit.tvalues[-1]) >= stop:
bestlag = lag
break
return bestlag
def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
transparams=True, start_params=None, solver='lbfgs', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.))
method : str {'cmle', 'mle'}, optional
cmle - Conditional maximum likelihood using OLS
mle - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
hqic - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
The below can be specified if method is 'mle'
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array-like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used if method is 'mle'. The default is 'lbfgs'
(limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
and 'powell'.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
References
----------
Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
series with missing observations." `Technometrics`. 22.3.
389-95.
See also
--------
statsmodels.base.model.LikelihoodModel.fit
"""
method = method.lower()
if method not in ['cmle', 'yw', 'mle']:
raise ValueError("Method %s not recognized" % method)
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog) # overwritten if method is 'cmle'
endog = self.endog
if maxlag is None:
maxlag = int(round(12*(nobs/100.)**(1/4.)))
k_ar = maxlag # stays this if ic is None
# select lag length
if ic is not None:
ic = ic.lower()
if ic not in ['aic', 'bic', 'hqic', 't-stat']:
raise ValueError("ic option %s not understood" % ic)
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar # change to what was chosen by ic
# redo estimation for best lag
# make LHS
Y = endog[k_ar:, :]
# make lagged RHS
X = self._stackX(k_ar, trend) # sets self.k_trend
k_trend = self.k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if method == "cmle": # do OLS
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr/arfit.nobs # needed for predict fcasterr
elif method == "mle":
solver = solver.lower()
self.nobs = nobs
if start_params is None:
start_params = OLS(Y, X).fit().params
else:
if len(start_params) != k_trend + k_ar:
raise ValueError("Length of start params is %d. There"
" are %d parameters." %
(len(start_params), k_trend + k_ar))
start_params = self._invtransparams(start_params)
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(AR, self).fit(start_params=start_params,
method=solver, maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, **kwargs)
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
# don't use yw, because we can't estimate the constant
#elif method == "yw":
# params, omega = yule_walker(endog, order=maxlag,
# method="mle", demean=False)
# how to handle inference after Yule-Walker?
# self.params = params #TODO: don't attach here
# self.omega = omega
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(self, params, normalized_cov_params)
if method == 'mle' and full_output:
arfit.mle_retvals = mlefit.mle_retvals
arfit.mle_settings = mlefit.mle_settings
return ARResultsWrapper(arfit)
class ARResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an AR model.
Parameters
----------
model : AR Model instance
Reference to the model that is fit.
params : array
The fitted parameters from the AR Model.
normalized_cov_params : array
inv(dot(X.T,X)) where X is the lagged values.
scale : float, optional
An estimate of the scale of the model.
Returns
-------
**Attributes**
aic : float
Akaike Information Criterion using Lutkephol's definition.
:math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`
bic : float
Bayes Information Criterion
:math:`\\log(\\sigma) + (1 + k_ar + k_trend)*\\log(nobs)/nobs`
bse : array
The standard errors of the estimated parameters. If `method` is 'cmle',
then the standard errors that are returned are the OLS standard errors
of the coefficients. If the `method` is 'mle' then they are computed
using the numerical Hessian.
fittedvalues : array
The in-sample predicted values of the fitted AR model. The `k_ar`
initial values are computed via the Kalman Filter if the model is
fit by `mle`.
fpe : float
Final prediction error using Lutkepohl's definition
((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma
hqic : float
Hannan-Quinn Information Criterion.
k_ar : float
Lag length. Sometimes used as `p` in the docs.
k_trend : float
The number of trend terms included. 'nc'=0, 'c'=1.
llf : float
The loglikelihood of the model evaluated at `params`. See `AR.loglike`
model : AR model instance
A reference to the fitted AR model.
nobs : float
The number of available observations `nobs` - `k_ar`
n_totobs : float
The number of total observations in `endog`. Sometimes `n` in the docs.
params : array
The fitted parameters of the model.
pvalues : array
The p values associated with the standard errors.
resid : array
The residuals of the model. If the model is fit by 'mle' then the
pre-sample residuals are calculated using fittedvalues from the Kalman
Filter.
roots : array
The roots of the AR process are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
scale : float
Same as sigma2
sigma2 : float
The variance of the innovations (residuals).
trendorder : int
The polynomial order of the trend. 'nc' = None, 'c' or 't' = 0,
'ct' = 1, etc.
tvalues : array
The t-values associated with `params`.
"""
_cache = {} # for scale setter
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARResults, self).__init__(model, params, normalized_cov_params,
scale)
self._cache = resettable_cache()
self.nobs = model.nobs
n_totobs = len(model.endog)
self.n_totobs = n_totobs
self.X = model.X # copy?
self.Y = model.Y
k_ar = model.k_ar
self.k_ar = k_ar
k_trend = model.k_trend
self.k_trend = k_trend
trendorder = None
if k_trend > 0:
trendorder = k_trend - 1
self.trendorder = trendorder
#TODO: cmle vs mle?
self.df_model = k_ar + k_trend
self.df_resid = self.model.df_resid = n_totobs - self.df_model
@cache_writable()
def sigma2(self):
model = self.model
if model.method == "cmle": # do DOF correction
return 1. / self.nobs * sumofsq(self.resid)
else:
return self.model.sigma2
@cache_writable() # for compatability with RegressionResults
def scale(self):
return self.sigma2
@cache_readonly
def bse(self): # allow user to specify?
if self.model.method == "cmle": # uses different scale/sigma def.
resid = self.resid
ssr = np.dot(resid, resid)
ols_scale = ssr / (self.nobs - self.k_ar - self.k_trend)
return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
else:
hess = approx_hess(self.params, self.model.loglike)
return np.sqrt(np.diag(-np.linalg.inv(hess)))
@cache_readonly
def pvalues(self):
return norm.sf(np.abs(self.tvalues))*2
@cache_readonly
def aic(self):
#JP: this is based on loglike with dropped constant terms ?
# Lutkepohl
#return np.log(self.sigma2) + 1./self.model.nobs * self.k_ar
# Include constant as estimated free parameter and double the loss
return np.log(self.sigma2) + 2 * (1 + self.df_model)/self.nobs
# Stata defintion
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * (self.k_ar+self.k_trend)/nobs
@cache_readonly
def hqic(self):
nobs = self.nobs
# Lutkepohl
# return np.log(self.sigma2)+ 2 * np.log(np.log(nobs))/nobs * self.k_ar
# R uses all estimated parameters rather than just lags
return (np.log(self.sigma2) + 2 * np.log(np.log(nobs))/nobs *
(1 + self.df_model))
# Stata
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * np.log(np.log(nobs))/nobs * \
# (self.k_ar + self.k_trend)
@cache_readonly
def fpe(self):
nobs = self.nobs
df_model = self.df_model
#Lutkepohl
return ((nobs+df_model)/(nobs-df_model))*self.sigma2
@cache_readonly
def bic(self):
nobs = self.nobs
# Lutkepohl
#return np.log(self.sigma2) + np.log(nobs)/nobs * self.k_ar
# Include constant as est. free parameter
return np.log(self.sigma2) + (1 + self.df_model) * np.log(nobs)/nobs
# Stata
# return -2 * self.llf/nobs + np.log(nobs)/nobs * (self.k_ar + \
# self.k_trend)
@cache_readonly
def resid(self):
#NOTE: uses fittedvalues because it calculate presample values for mle
model = self.model
endog = model.endog.squeeze()
if model.method == "cmle": # elimate pre-sample
return endog[self.k_ar:] - self.fittedvalues
else:
return model.endog.squeeze() - self.fittedvalues
#def ssr(self):
# resid = self.resid
# return np.dot(resid, resid)
@cache_readonly
def roots(self):
k = self.k_trend
return np.roots(np.r_[1, -self.params[k:]]) ** -1
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params)
def predict(self, start=None, end=None, dynamic=False):
params = self.params
predictedvalues = self.model.predict(params, start, end, dynamic)
return predictedvalues
#start = self.model._get_predict_start(start)
#end, out_of_sample = self.model._get_predict_end(end)
##TODO: return forecast errors and confidence intervals
#from statsmodels.tsa.arima_process import arma2ma
#ma_rep = arma2ma(np.r_[1,-params[::-1]], [1], out_of_sample)
#fcasterr = np.sqrt(self.sigma2 * np.cumsum(ma_rep**2))
preddoc = AR.predict.__doc__.split('\n')
extra_doc = (""" confint : bool, float
Whether to return confidence intervals. If `confint` == True,
95 % confidence intervals are returned. Else if `confint` is a
float, then it is assumed to be the alpha value of the confidence
interval. That is confint == .05 returns a 95% confidence
interval, and .10 would return a 90% confidence interval."""
).split('\n')
#ret_doc = """
# fcasterr : array-like
# confint : array-like
#"""
predict.__doc__ = '\n'.join(preddoc[:5] + preddoc[7:20] + extra_doc +
preddoc[20:])
class ARResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARResultsWrapper, ARResults)
if __name__ == "__main__":
import statsmodels.api as sm
sunspots = sm.datasets.sunspots.load()
# Why does R demean the data by defaut?
ar_ols = AR(sunspots.endog)
res_ols = ar_ols.fit(maxlag=9)
ar_mle = AR(sunspots.endog)
res_mle_bfgs = ar_mle.fit(maxlag=9, method="mle", solver="bfgs",
maxiter=500, gtol=1e-10)
# res_mle2 = ar_mle.fit(maxlag=1, method="mle", maxiter=500, penalty=True,
# tol=1e-13)
# ar_yw = AR(sunspots.endog)
# res_yw = ar_yw.fit(maxlag=4, method="yw")
# # Timings versus talkbox
# from timeit import default_timer as timer
# print "Time AR fit vs. talkbox"
# # generate a long series of AR(2) data
#
# nobs = 1000000
# y = np.empty(nobs)
# y[0:2] = 0
# for i in range(2,nobs):
# y[i] = .25 * y[i-1] - .75 * y[i-2] + np.random.rand()
#
# mod_sm = AR(y)
# t = timer()
# res_sm = mod_sm.fit(method="yw", trend="nc", demean=False, maxlag=2)
# t_end = timer()
# print str(t_end - t) + " seconds for sm.AR with yule-walker, 2 lags"
# try:
# import scikits.talkbox as tb
# except:
# raise ImportError("You need scikits.talkbox installed for timings")
# t = timer()
# mod_tb = tb.lpc(y, 2)
# t_end = timer()
# print str(t_end - t) + " seconds for talkbox.lpc"
# print """For higher lag lengths ours quickly fills up memory and starts
#thrashing the swap. Should we include talkbox C code or Cythonize the
#Levinson recursion algorithm?"""
## Try with a pandas series
import pandas
import scikits.timeseries as ts
d1 = ts.Date(year=1700, freq='A')
#NOTE: have to have yearBegin offset for annual data until parser rewrite
#should this be up to the user, or should it be done in TSM init?
#NOTE: not anymore, it's end of year now
ts_dr = ts.date_array(start_date=d1, length=len(sunspots.endog))
pandas_dr = pandas.DateRange(start=d1.datetime,
periods=len(sunspots.endog), timeRule='A@DEC')
#pandas_dr = pandas_dr.shift(-1, pandas.datetools.yearBegin)
dates = np.arange(1700, 1700 + len(sunspots.endog))
dates = ts.date_array(dates, freq='A')
#sunspots = pandas.Series(sunspots.endog, index=dates)
#NOTE: pandas only does business days for dates it looks like
import datetime
dt_dates = np.asarray(lmap(datetime.datetime.fromordinal,
ts_dr.toordinal().astype(int)))
sunspots = pandas.Series(sunspots.endog, index=dt_dates)
#NOTE: pandas can't handle pre-1900 dates
mod = AR(sunspots, freq='A')
res = mod.fit(method='mle', maxlag=9)
# some data for an example in Box Jenkins
IBM = np.asarray([460, 457, 452, 459, 462, 459, 463, 479, 493, 490.])
w = np.diff(IBM)
theta = .5
| bsd-3-clause |
icdishb/scikit-learn | examples/svm/plot_custom_kernel.py | 115 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ChanChiChoi/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
frank-tancf/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 85 | 5728 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[[rng.randint(0, n_queries)]]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', prop=dict(size='small'))
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
Bollegala/MLIB | utils/genArtificialData.py | 1 | 4811 | '''
Created on Aug 5, 2011
@author: danu
'''
from matplotlib import pyplot as plt
import numpy as np
def twoGaussians(n):
"""
Generates 2d n data points from two Gaussians.
Returns an array of 2D vectors corresponding to the data points
and a 1d array indicating the true labels of those data points.
"""
flag = np.zeros(n, dtype='float')
randVals = np.random.rand(n)
for i in range(0, n):
if randVals[i] > 0.5:
flag[i] = 1
else:
flag[i] = 0
flag.sort()
gaussian = np.random.randn(2, n)
shift = np.zeros((2,n), dtype='float')
shift[0,:] = flag * 2 - 1
x = gaussian + 5 * shift
ytrue = flag
# Centralization.
meanVect = np.array([np.mean(x[0,:]), np.mean(x[1,:])])
x = x - meanVect.reshape(-1, 1)
# Normalization.
stdVect = np.array([np.std(x[0,:]), np.std(x[1,:])])
x = x / stdVect.reshape(-1,1)
return x, ytrue
def fourGaussians(n):
"""
Generates 2d n data points from four Gaussians.
Returns an array of 2D vectors corresponding to the data points
and a 1d array indicating the true labels of those data points.
"""
flag1 = np.zeros(n, dtype='float')
randVals1 = np.random.rand(n)
for i in range(0, n):
if randVals1[i] > 0.5:
flag1[i] = 1
else:
flag1[i] = 0
flag2 = np.zeros(n, dtype='float')
randVals2 = np.random.rand(n)
for i in range(0, n):
if randVals2[i] > 0.5:
flag2[i] = 1
else:
flag2[i] = 0
gaussian = np.random.randn(2, n)
shift = np.zeros((2,n), dtype='float')
shift[0,:] = flag1 * 4 - 2
shift[1,:] = flag2 * 4 - 2
x = 0.5 * gaussian + shift
ytrue = flag1 + flag2 * 2
# Centralization.
meanVect = np.array([np.mean(x[0,:]), np.mean(x[1,:])])
x = x - meanVect.reshape(-1, 1)
# Normalization.
stdVect = np.array([np.std(x[0,:]), np.std(x[1,:])])
x = x / stdVect.reshape(-1,1)
return x, ytrue
def spiral(n):
"""
Generates two spirals. The datapoints are given by vectors in
x and their corresponding class labels in ytrue.
"""
x = np.zeros((2,n), dtype='float')
for i in range(0, int(n / 2)):
r = 1 + (4 * float(i -1)) / float(n)
t = (np.pi * float(i - 1) * 3) / float(n)
x[0,i] = r * np.cos(t)
x[1,i] = r * np.sin(t)
x[0, i + int(n / 2)] = r * np.cos(t + np.pi)
x[1, i + int(n / 2)] = r * np.sin(t + np.pi)
x = x + 0.1 * np.random.randn(2, n)
ytrue = np.zeros(n, dtype='float')
for i in range(int(n / 2), n):
ytrue[i] = 1
# Centralization.
meanVect = np.array([np.mean(x[0,:]), np.mean(x[1,:])])
x = x - meanVect.reshape(-1, 1)
# Normalization.
stdVect = np.array([np.std(x[0,:]), np.std(x[1,:])])
x = x / stdVect.reshape(-1,1)
return x, ytrue
def highLowDensities(n):
"""
Creates a dataset with two Gaussians, where one has a high
density and the other has a low density. Data points are
arranged in column arrays in x, and their corresponding
class labels are given in ytrue.
"""
gaussian = np.random.randn(2, int(n / 2))
x = np.zeros((2,n), dtype='float')
x[:,:int(n / 2)] = 0.1 * gaussian
x[:,int(n / 2):] = gaussian
ytrue = np.zeros(n)
ytrue[int(n / 2):] = np.ones(int(n / 2))
# Centralization.
meanVect = np.array([np.mean(x[0,:]), np.mean(x[1,:])])
x = x - meanVect.reshape(-1, 1)
# Normalization.
stdVect = np.array([np.std(x[0,:]), np.std(x[1,:])])
x = x / stdVect.reshape(-1,1)
return x, ytrue
def circleAndGaussian(n):
"""
Generates a circle and a Gaussian.
Data points are arranged as 2d column vectors in x,
and their corresponding labels are given in ytrue.
"""
x = np.zeros((2,n), dtype='float')
x[0,: (n / 2)] = 5 * np.cos(np.linspace(0, 2 * np.pi, n / 2))
x[0, (n / 2):] = np.random.randn(1, n / 2)
x[1,: (n / 2)] = 5 * np.sin(np.linspace(0, 2 * np.pi, n / 2))
x[1, (n / 2):] = np.random.randn(1, n / 2)
x = x + 0.1 * np.random.randn(2, n)
ytrue = np.zeros(n, dtype='float')
ytrue[(n / 2):] = np.ones((n / 2), dtype='float')
# Centralization.
meanVect = np.array([np.mean(x[0,:]), np.mean(x[1,:])])
x = x - meanVect.reshape(-1, 1)
# Normalization.
stdVect = np.array([np.std(x[0,:]), np.std(x[1,:])])
x = x / stdVect.reshape(-1,1)
return x, ytrue
def writeDataset(fname, x, y, n):
"""
Writes the dataset to a feature vector file.
"""
F = open(fname, 'w')
for i in range(0,n):
lbl = 2* y[i] - 1
#lbl = y[i] + 1
F.write("%d 1:%f 2:%f\n" % (lbl, x[:,i][0], x[:,i][1]))
F.close()
pass
def main():
dataSet = 5
n = 2000
if dataSet == 1:
x, ytrue = twoGaussians(n)
elif dataSet == 2:
x, ytrue = fourGaussians(n)
elif dataSet == 3:
x, ytrue = spiral(n)
elif dataSet == 4:
x, ytrue = highLowDensities(n)
elif dataSet == 5:
x, ytrue = circleAndGaussian(n)
print "Plotting..."
symbols = ['bo', 'rx', 'g*', 'ks']
for i in range(0,n):
plt.plot(x[:,i][0], x[:,i][1], symbols[int(ytrue[i])])
plt.show()
writeDataset("circleAndGuassian", x, ytrue, n)
pass
if __name__ == "__main__":
main()
| bsd-3-clause |
neilpanchal/iPython-Notebook-Profile | profile_neil/ipython_notebook_config.py | 1 | 19985 | # Configuration file for ipython-notebook.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = 'localhost'
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = []
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
# c.NotebookApp.open_browser = True
# The cluster manager class to use.
# c.NotebookApp.cluster_manager_class = <class 'IPython.html.services.clusters.clustermanager.ClusterManager'>
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = ''
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = <class 'IPython.html.services.contents.filemanager.FileContentsManager'>
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.NotebookApp.extra_config_file = ''
# The IPython profile to use.
# c.NotebookApp.profile = 'default'
# Set the log level by value or name.
# c.NotebookApp.log_level = 30
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = ''
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = <class 'IPython.html.auth.logout.LogoutHandler'>
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = {}
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = <class 'IPython.html.services.kernels.kernelmanager.MappingKernelManager'>
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# The config manager class to use
# c.NotebookApp.config_manager_class = <class 'IPython.html.services.config.manager.ConfigManager'>
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The login handler class to use.
# c.NotebookApp.login_handler_class = <class 'IPython.html.auth.login.LoginHandler'>
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = ''
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.tornado_settings = {}
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# The directory to use for notebooks and kernels.
# c.NotebookApp.notebook_dir = ''
# The session manager class to use.
# c.NotebookApp.session_manager_class = <class 'IPython.html.services.sessions.sessionmanager.SessionManager'>
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = {}
# The kernel spec manager class to use. Should be a subclass of
# `IPython.kernel.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of IPython and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = <class 'IPython.kernel.kernelspec.KernelSpecManager'>
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# The date format used by logging formatters for %(asctime)s
# c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
#
# c.NotebookApp.file_to_run = ''
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = ''
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from IPython.html.templates.
# c.NotebookApp.extra_template_paths = []
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = ''
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = []
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# set the control (ROUTER) port [default: random]
# c.KernelManager.control_port = 0
# set the shell (ROUTER) port [default: random]
# c.KernelManager.shell_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = ''
# set the stdin (ROUTER) port [default: random]
# c.KernelManager.stdin_port = 0
# set the heartbeat port [default: random]
# c.KernelManager.hb_port = 0
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# set the iopub (PUB) port [default: random]
# c.KernelManager.iopub_port = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.KernelManager.connection_file = ''
#
# c.KernelManager.transport = 'tcp'
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Debug output in the Session
# c.Session.debug = False
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Username for the Session. Default is your system username.
# c.Session.username = 'Neil'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# execution key, for signing messages.
# c.Session.key = b''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# path to file containing execution key.
# c.Session.keyfile = ''
# The UUID identifying this session.
# c.Session.session = ''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
#
# c.MappingKernelManager.root_dir = ''
# The name of the default kernel to start
# c.MappingKernelManager.default_kernel_name = 'python3'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.kernel.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
#
# c.ContentsManager.checkpoints = None
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
#
# c.ContentsManager.checkpoints_class = <class 'IPython.html.services.contents.checkpoints.Checkpoints'>
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
#
# c.ContentsManager.checkpoints_kwargs = {}
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# FileContentsManager will inherit config from: ContentsManager
#
# c.FileContentsManager.root_dir = ''
# The base name used when creating untitled files.
# c.FileContentsManager.untitled_file = 'untitled'
# The base name used when creating untitled directories.
# c.FileContentsManager.untitled_directory = 'Untitled Folder'
#
# c.FileContentsManager.checkpoints = None
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.FileContentsManager.pre_save_hook = None
#
# c.FileContentsManager.checkpoints_class = <class 'IPython.html.services.contents.checkpoints.Checkpoints'>
# Glob patterns to hide in file and directory listings.
# c.FileContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
# The base name used when creating untitled notebooks.
# c.FileContentsManager.untitled_notebook = 'Untitled'
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#
# c.FileContentsManager.checkpoints_kwargs = {}
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The sqlite file in which to store notebook signatures. By default, this will
# be in your IPython profile. You can set it to ':memory:' to disable sqlite
# writing to the filesystem.
# c.NotebookNotary.db_file = ''
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = b''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = ''
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = set()
| mit |
Alan-Robertson/python-qinfer | src/qinfer/examples/rebit_plotting_example.py | 1 | 12182 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##
# rebit_plotting_example.py: rebit tomography illustration module
##
# © 2013 Chris Ferrie (csferrie@gmail.com) and
# Christopher E. Granade (cgranade@gmail.com)
#
# This file is a part of the Qinfer project.
# Licensed under the AGPL version 3.
##
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## DOCUMENTATION ###############################################################
"""
Usage: rebit_tomography_example.py [options]
-h, --help Prints this help and returns.
-n NP, --n_particles=NP Specifies how many particles to use in the SMC
approximation. [default: 5000]
-e NE, --n_exp=NE Specifies how many measurements are to be made.
[default: 100]
-a ALGO, --algorithm=ALGO Specifies which algorithm to use; currently 'SMC'
and 'SMC-ABC' are supported. [default: SMC]
-r ALGO, --resampler=ALGO Specifies which resampling algorithm to use;
currently 'LW', 'DBSCAN-LW' and 'WDBSCAN-LW' are
supported. [default: LW]
--lw-a=A Parameter ``a`` of the LW resampling algorithm.
[default: 0.98]
--dbscan-eps=EPS Epsilon parameter for the DBSCAN-based resamplers.
[default: 0.5]
--dbscan-minparticles=N Minimum number of particles allowed in a cluster by
the DBSCAN-based resamplers. [default: 5]
--wdbscan-pow=POW Power by which the weight is to be raised in the
WDBSCAN weighting step. [default: 0.5]
step. [default: 10000]
--no-plot Suppresses plotting when passed.
-v, --verbose Prints additional debugging information.
"""
## FEATURES ####################################################################
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
## IMPORTS #####################################################################
from builtins import range
import numpy as np
import matplotlib.pyplot as plt
import time
import numpy.linalg as la
from scipy.spatial import Delaunay
import sys
## Imports from within QInfer. ##
from .. import tomography, smc
from ..resamplers import LiuWestResampler, ClusteringResampler
from ..utils import mvee, uniquify
## External libraries bundled with QInfer. ##
from .._lib import docopt
## CLASSES #####################################################################
class HilbertSchmidtUniform(object):
"""
Creates a new Hilber-Schmidt uniform prior on state space of dimension ``dim``.
See e.g. [Mez06]_ and [Mis12]_.
"""
def __init__(self):
self.dim = 2
def sample(self):
#Generate random unitary (see e.g. http://arxiv.org/abs/math-ph/0609050v2)
g = (np.random.randn(self.dim,self.dim) + 1j*np.random.randn(self.dim,self.dim))/np.sqrt(2.0)
q,r = la.qr(g)
d = np.diag(r)
ph = d/np.abs(d)
ph = np.diag(ph)
U = np.dot(q,ph)
#Generate random matrix
z = np.random.randn(self.dim,self.dim) + 1j*np.random.randn(self.dim,self.dim)
rho = np.dot(np.dot(np.identity(self.dim)+U,np.dot(z,z.conj().transpose())),np.identity(self.dim)+U.conj().transpose())
rho = rho/np.trace(rho)
# TODO: generalize to Heisenberg-Weyl groups.
y = np.real(np.trace(np.dot(rho,np.array([[0,-1j],[1j,0]]))))
x = np.real(np.trace(np.dot(rho,np.array([[0,1],[1,0]]))))
return np.array([x,y])
## SCRIPT ######################################################################
if __name__ == "__main__":
# Handle command-line arguments using docopt.
args = docopt.docopt(__doc__, sys.argv[1:])
N_PARTICLES = int(args['--n_particles'])
n_exp = int(args['--n_exp'])
resamp_algo = args['--resampler']
verbose = bool(args['--verbose'])
lw_a = float(args['--lw-a'])
dbscan_eps = float(args['--dbscan-eps'])
dbscan_min = float(args['--dbscan-minparticles'])
wdbscan_pow = float(args['--wdbscan-pow'])
do_plot = not bool(args['--no-plot'])
# Model and prior initialization.
prior = HilbertSchmidtUniform()
model = tomography.RebitStatePauliModel()
expparams = np.array([
([1, 0], 1), # Records are indicated by tuples.
([0, 1], 1)
], dtype=model.expparams_dtype)
# Resampler initialization.
lw_args = {"a": lw_a}
dbscan_args = {
"eps": dbscan_eps,
"min_particles": dbscan_min,
"w_pow": wdbscan_pow
}
if resamp_algo == 'LW':
resampler = LiuWestResampler(**lw_args)
elif resamp_algo == 'DBSCAN-LW':
resampler = ClusteringResampler(
secondary_resampler=LiuWestResampler(**lw_args),
weighted=False, quiet=not verbose, **dbscan_args
)
elif resamp_algo == 'WDBSCAN-LW':
print("[WARN] The WDBSCAN-LW resampling algorithm is currently experimental, and may not work properly.")
resampler = ClusteringResampler(
secondary_resampler=LiuWestResampler(**lw_args),
weighted=True, quiet=not verbose, **dbscan_args
)
else:
raise ValueError('Must specify a valid resampler.')
updater = smc.SMCUpdater(model, N_PARTICLES, prior, resampler=resampler)
# Sample true set of modelparams
truemp = np.array([prior.sample()])
# Plot true state and prior
if do_plot:
fig = plt.figure()
ax = plt.gca()
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_ylim(-1,1)
ax.set_xlim(-1,1)
ax.set_aspect('equal')
u = np.linspace(0,2*np.pi,100)
x = np.cos(u)
y = np.sin(u)
plt.plot(x,y)
particles = updater.particle_locations
plt.scatter(particles[:, 0], particles[:, 1], s=10)
plt.scatter(truemp[:, 0], truemp[:, 1], c='red', s=50)
est_mean = updater.est_mean()
plt.scatter(est_mean[0], est_mean[1], c='cyan', s=50)
# Record the start time.
tic = time.time()
# Get all Bayesian up in here.
for idx_exp in range(n_exp):
# Randomly choose one of the three experiments from expparams and make
# an array containing just that experiment.
thisexp = expparams[np.newaxis, np.random.randint(0, 2)]
# Simulate an experiment according to the chosen expparams.
outcome = model.simulate_experiment(truemp, thisexp)
# Feed the data to the SMC particle updater.
updater.update(outcome, thisexp)
# Record how long it took us.
toc = time.time() - tic
# Print out summary statistics.
print("True param: {}".format(truemp))
print("Est. mean: {}".format(updater.est_mean()))
print("Est. cov: {}".format(updater.est_covariance_mtx()))
print("Error: {}".format(np.sum(np.abs(truemp[0]-updater.est_mean())**2)))
print("Trace Cov: {}".format(np.trace(updater.est_covariance_mtx())))
print("Resample count: {}".format(updater.resample_count))
print("Elapsed time: {}".format(toc))
est_mean = updater.est_mean()
if do_plot:
fig = plt.figure()
ax = plt.gca()
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_ylim(-1,1)
ax.set_xlim(-1,1)
ax.set_aspect('equal')
particles = updater.particle_locations
weights = updater.particle_weights
maxweight = np.max(weights)
u = np.linspace(0,2*np.pi,100)
x = np.cos(u)
y = np.sin(u)
plt.plot(x,y)
plt.scatter(
particles[:, 0], particles[:, 1],
s=20 * (1 + (weights - 1 / N_PARTICLES) * N_PARTICLES)
)
temp = thisexp['axis'][0]*(-1)**outcome
#plt.scatter(temp[0], temp[1], c='green', s=50)
plt.scatter(truemp[:, 0], truemp[:, 1], c='red', s=50)
plt.scatter(est_mean[0], est_mean[1], c='cyan', s=50)
points = updater.est_credible_region(level = 0.95)
tri = Delaunay(points)
faces = []
hull = tri.convex_hull
for ia, ib in hull:
faces.append(points[[ia, ib]])
vertices = points[uniquify(hull.flatten())]
temp = vertices - np.mean(vertices, 0)
idx_srt = np.argsort(np.arctan2(temp[:, 1], temp[:, 0]))
idx_srt = np.append(idx_srt,idx_srt[0])
fig = plt.figure()
ax = plt.gca()
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_ylim(-1,1)
ax.set_xlim(-1,1)
ax.set_aspect('equal')
particles = updater.particle_locations
weights = updater.particle_weights
maxweight = np.max(weights)
u = np.linspace(0,2*np.pi,100)
x = np.cos(u)
y = np.sin(u)
plt.plot(x,y)
plt.scatter(
particles[:, 0], particles[:, 1],
s=20 * (1 + (weights - 1 / N_PARTICLES) * N_PARTICLES)
)
plt.scatter(truemp[:, 0], truemp[:, 1], c='red', s=50)
plt.scatter(est_mean[0], est_mean[1], c='cyan', s=25)
x = vertices[:,0][idx_srt]
y = vertices[:,1][idx_srt]
plt.plot(x,y)
fig = plt.figure()
ax = plt.gca()
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_ylim(-1,1)
ax.set_xlim(-1,1)
ax.set_aspect('equal')
particles = updater.particle_locations
weights = updater.particle_weights
maxweight = np.max(weights)
u = np.linspace(0,2*np.pi,100)
x = np.cos(u)
y = np.sin(u)
plt.plot(x,y)
plt.scatter(
particles[:, 0], particles[:, 1],
s=20 * (1 + (weights - 1 / N_PARTICLES) * N_PARTICLES)
)
plt.scatter(truemp[:, 0], truemp[:, 1], c='red', s=50)
plt.scatter(est_mean[0], est_mean[1], c='cyan', s=25)
x = vertices[:,0][idx_srt]
y = vertices[:,1][idx_srt]
plt.plot(x,y)
A, centroid = mvee(vertices, 0.001)
# Plot mvee ellipse.
U, D, V = la.svd(A)
rx, ry = [1 / np.sqrt(d) for d in D]
u = np.linspace(0,(2 * np.pi),100)
x = rx * np.cos(u)
y = ry * np.sin(u)
for idx in range(x.shape[0]):
x[idx], y[idx] = \
np.dot(
np.transpose(V),
np.array([x[idx],y[idx]])
) + centroid
plt.plot(x,y)
# Plot covariance ellipse.
U, D, V = la.svd(la.inv(updater.est_covariance_mtx()))
rx, ry = [np.sqrt(6/d) for d in D]
u = np.linspace(0,(2 * np.pi),100)
x = rx * np.cos(u)
y = ry * np.sin(u)
for idx in range(x.shape[0]):
x[idx], y[idx] = \
np.dot(
np.transpose(V),
np.array([x[idx],y[idx]])
) + updater.est_mean()
plt.plot(x,y)
plt.show()
| agpl-3.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/exceptions.py | 50 | 5276 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'SkipTestWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior.
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
.. versionadded:: 0.18
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
.. versionchanged:: 0.18
Moved from sklearn.cross_validation.
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation, extends EfficiencyWarning.
"""
class SkipTestWarning(UserWarning):
"""Warning class used to notify the user of a test that was skipped.
For example, one of the estimator checks requires a pandas import.
If the pandas package cannot be imported, the test will be skipped rather
than register as a failure.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
| mit |
ishanic/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/numpy/lib/twodim_base.py | 26 | 26904 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print(H[::-1]) # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| gpl-2.0 |
mantidproject/mantid | qt/python/mantidqt/widgets/plotconfigdialog/test/test_plotconfigdialogpresenter.py | 3 | 12339 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
import unittest
from unittest.mock import Mock, call, patch
from matplotlib import use as mpl_use
mpl_use('Agg') # noqa
from matplotlib.pyplot import figure, subplots
from mantidqt.widgets.plotconfigdialog.presenter import PlotConfigDialogPresenter
PRESENTER_REF = 'mantidqt.widgets.plotconfigdialog.presenter.'
class PlotConfigDialogPresenterTest(unittest.TestCase):
def setUp(self):
self.axes_patch = patch(PRESENTER_REF + 'AxesTabWidgetPresenter',
new=Mock())
self.axes_mock = self.axes_patch.start()
self.curves_patch = patch(PRESENTER_REF + 'CurvesTabWidgetPresenter',
new=Mock())
self.curves_mock = self.curves_patch.start()
self.images_patch = patch(PRESENTER_REF + 'ImagesTabWidgetPresenter',
new=Mock())
self.images_mock = self.images_patch.start()
self.legend_patch = patch(PRESENTER_REF + 'LegendTabWidgetPresenter',
new=Mock())
self.legend_mock = self.legend_patch.start()
def tearDown(self):
self.axes_patch.stop()
self.curves_patch.stop()
self.images_patch.stop()
self.legend_patch.stop()
def assert_called_x_times_with(self, x, call_args, mock):
self.assertEqual(x, mock.call_count)
self.assertEqual(call_args, [arg[0][0] for arg in mock.call_args_list])
def test_correct_tabs_present_axes_only(self):
fig = figure()
fig.add_subplot(111)
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
expected_presenter_list = [None, self.axes_mock.return_value, None, None]
self.assertEqual(expected_presenter_list, presenter.tab_widget_presenters)
mock_view.add_tab_widget.assert_called_once_with(
(self.axes_mock.return_value.view, 'Axes'))
def test_correct_tabs_present_axes_and_curve_no_errors(self):
fig = figure()
ax = fig.add_subplot(111)
ax.plot([0], [0])
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
expected_presenter_list = [None, self.axes_mock.return_value,
self.curves_mock.return_value, None]
self.assertEqual(expected_presenter_list, presenter.tab_widget_presenters)
expected_call_args = [(self.axes_mock.return_value.view, 'Axes'),
(self.curves_mock.return_value.view, 'Curves')]
self.assert_called_x_times_with(2, expected_call_args,
mock_view.add_tab_widget)
def test_correct_tabs_present_axes_and_curve_with_errors(self):
fig = figure()
ax = fig.add_subplot(111)
ax.errorbar([0], [0], yerr=[1])
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
expected_presenter_list = [None, self.axes_mock.return_value,
self.curves_mock.return_value, None]
self.assertEqual(expected_presenter_list, presenter.tab_widget_presenters)
expected_call_args = [(self.axes_mock.return_value.view, 'Axes'),
(self.curves_mock.return_value.view, 'Curves')]
self.assert_called_x_times_with(2, expected_call_args,
mock_view.add_tab_widget)
def test_correct_tabs_present_axes_and_image_colormesh(self):
fig = figure()
ax = fig.add_subplot(111)
ax.pcolormesh([[0, 1], [1, 0]])
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
expected_presenter_list = [None, self.axes_mock.return_value,
None, self.images_mock.return_value]
self.assertEqual(expected_presenter_list, presenter.tab_widget_presenters)
expected_call_args = [(self.axes_mock.return_value.view, 'Axes'),
(self.images_mock.return_value.view, 'Images')]
self.assert_called_x_times_with(2, expected_call_args,
mock_view.add_tab_widget)
def test_correct_tabs_present_axes_and_image_imshow(self):
fig = figure()
ax = fig.add_subplot(111)
ax.imshow([[0, 1], [1, 0]])
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
expected_presenter_list = [None, self.axes_mock.return_value,
None, self.images_mock.return_value]
self.assertEqual(expected_presenter_list, presenter.tab_widget_presenters)
expected_call_args = [(self.axes_mock.return_value.view, 'Axes'),
(self.images_mock.return_value.view, 'Images')]
self.assert_called_x_times_with(2, expected_call_args,
mock_view.add_tab_widget)
def test_correct_tabs_present_axes_curves_and_image(self):
fig = figure()
ax = fig.add_subplot(211)
ax.imshow([[0, 1], [1, 0]])
ax1 = fig.add_subplot(212)
ax1.errorbar([0], [0], yerr=[1])
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
expected_presenter_list = [None, self.axes_mock.return_value,
self.curves_mock.return_value,
self.images_mock.return_value]
self.assertEqual(expected_presenter_list, presenter.tab_widget_presenters)
expected_call_args = [(self.axes_mock.return_value.view, 'Axes'),
(self.curves_mock.return_value.view, 'Curves'),
(self.images_mock.return_value.view, 'Images')]
self.assert_called_x_times_with(3, expected_call_args,
mock_view.add_tab_widget)
def test_correct_tabs_present_axes_curves_and_legend(self):
fig = figure()
ax = fig.add_subplot(111)
ax.plot([0])
ax.legend(['Label'])
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
expected_presenter_list = [self.legend_mock.return_value,
self.axes_mock.return_value,
self.curves_mock.return_value, None]
self.assertEqual(expected_presenter_list, presenter.tab_widget_presenters)
expected_call_args = [(self.axes_mock.return_value.view, 'Axes'),
(self.curves_mock.return_value.view, 'Curves'),
(self.legend_mock.return_value.view, 'Legend')]
self.assert_called_x_times_with(3, expected_call_args,
mock_view.add_tab_widget)
def test_correct_tabs_present_axes_and_curve_legend_has_no_text(self):
fig = figure()
ax = fig.add_subplot(111)
ax.plot([0], [0])
ax.legend()
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
expected_presenter_list = [None, self.axes_mock.return_value,
self.curves_mock.return_value, None]
self.assertEqual(expected_presenter_list, presenter.tab_widget_presenters)
expected_call_args = [(self.axes_mock.return_value.view, 'Axes'),
(self.curves_mock.return_value.view, 'Curves')]
self.assert_called_x_times_with(2, expected_call_args,
mock_view.add_tab_widget)
def test_tabs_present_updated_properties_from_figure_when_apply_clicked(self):
fig = figure()
ax = fig.add_subplot(111)
ax.plot([0], [0])
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
# use mock manager to ensure all user properties are applied before view update
mock_axes_presenter = presenter.tab_widget_presenters[1]
mock_curves_presenter = presenter.tab_widget_presenters[2]
mock_manager = Mock()
mock_manager.attach_mock(mock_axes_presenter, "mock_axes_presenter")
mock_manager.attach_mock(mock_curves_presenter, "mock_curves_presenter")
presenter.apply_properties()
mock_manager.assert_has_calls([
call.mock_curves_presenter.apply_properties,
call.mock_axes_presenter.apply_properties,
call.mock_curves_presenter.update_view,
call.mock_axes_presenter.update_view
])
def test_forget_tab_from_presenter_sets_presenter_and_view_to_none(self):
fig = figure()
ax = fig.add_subplot(111)
ax.plot([0], [0])
ax.legend()
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
mock_curves_presenter = presenter.tab_widget_presenters[2]
mock_curves_view = mock_curves_presenter.view
self.assertTrue(mock_curves_presenter in presenter.tab_widget_presenters)
self.assertTrue((mock_curves_view, 'Curves') in presenter.tab_widget_views)
presenter.forget_tab_from_presenter(mock_curves_presenter)
self.assertTrue(mock_curves_presenter not in presenter.tab_widget_presenters)
self.assertTrue((mock_curves_view, 'Curves') not in presenter.tab_widget_views)
def test_configure_curves_tab_fails_silently_when_curves_tab_not_exists(self):
fig = figure()
ax = fig.add_subplot(111)
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
self.assertIsNone(presenter.tab_widget_presenters[2])
presenter.configure_curves_tab(ax, None)
mock_view.set_current_tab_widget.assert_not_called()
def test_configure_curves_tab_fails_silently_when_no_curves_on_axes(self):
fig, (ax0, ax1) = subplots(2, subplot_kw={'projection': 'mantid'})
ax0.plot([0], [0]) # One axes must have a curve for curves tab to exist
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
mock_curves_presenter = presenter.tab_widget_presenters[2]
mock_curves_presenter.set_axes_from_object.side_effect = ValueError("Axes object does not exist in curves tab")
presenter.configure_curves_tab(ax1, None)
mock_curves_presenter.set_axes_from_object.assert_called()
mock_view.set_current_tab_widget.assert_not_called()
def test_configure_curves_tab_fails_silently_when_curve_not_found_in_curves_tab(self):
fig = figure()
ax = fig.add_subplot(111)
ax.plot([0], [0]) # Must plot curve for curves tab to exist, hence why we dont use this in the call
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
mock_curves_presenter = presenter.tab_widget_presenters[2]
mock_curves_presenter.set_curve_from_object.side_effect = ValueError("Curve object does not exist in curves tab")
mock_curves_view, _ = presenter.tab_widget_views[1]
presenter.configure_curves_tab(ax, Mock())
mock_curves_presenter.set_axes_from_object.assert_called()
mock_view.set_current_tab_widget.assert_called_with(mock_curves_view)
mock_view.set_current_tab_widget.assert_called()
def test_configure_curves_tab_succeeds_when_curve_and_axes_exist(self):
fig = figure()
ax = fig.add_subplot(111)
curve = ax.plot([0], [0])
mock_view = Mock()
presenter = PlotConfigDialogPresenter(fig, mock_view)
mock_curves_presenter = presenter.tab_widget_presenters[2]
mock_curves_view, _ = presenter.tab_widget_views[1]
presenter.configure_curves_tab(ax, curve)
mock_curves_presenter.set_axes_from_object.assert_called()
mock_view.set_current_tab_widget.assert_called_with(mock_curves_view)
mock_view.set_current_tab_widget.assert_called()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/tests/json/test_pandas.py | 7 | 42394 | # pylint: disable-msg=W0612,E1101
import nose
from pandas.compat import range, lrange, StringIO, OrderedDict
import os
import numpy as np
from pandas import (Series, DataFrame, DatetimeIndex, Timestamp,
read_json, compat)
from datetime import timedelta
import pandas as pd
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network,
ensure_clean, assert_index_equal)
import pandas.util.testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(np.int64))
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ['bah'] * 5 + ['bar'] * 5 + ['baz'] * \
5 + ['foo'] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name='E')
_cat_frame['E'] = list(reversed(cat))
_cat_frame['sort'] = np.arange(len(_cat_frame), dtype='int64')
_mixed_frame = _frame.copy()
class TestPandasContainer(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty_series = Series([], index=[])
self.empty_frame = DataFrame({})
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
def tearDown(self):
del self.dirpath
del self.ts
del self.series
del self.objSeries
del self.empty_series
del self.empty_frame
del self.frame
del self.frame2
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self):
df = DataFrame([['a', 'b'], ['c', 'd']],
index=['index " 1', 'index / 2'],
columns=['a \\ b', 'y / z'])
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
assert_frame_equal(df, read_json(df.to_json(orient='columns'),
orient='columns'))
assert_frame_equal(df, read_json(df.to_json(orient='index'),
orient='index'))
df_unser = read_json(df.to_json(orient='records'), orient='records')
assert_index_equal(df.columns, df_unser.columns)
tm.assert_numpy_array_equal(df.values, df_unser.values)
def test_frame_non_unique_index(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 1],
columns=['x', 'y'])
self.assertRaises(ValueError, df.to_json, orient='index')
self.assertRaises(ValueError, df.to_json, orient='columns')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
unser = read_json(df.to_json(orient='records'), orient='records')
self.assert_index_equal(df.columns, unser.columns)
tm.assert_almost_equal(df.values, unser.values)
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
def test_frame_non_unique_columns(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 2],
columns=['x', 'x'])
self.assertRaises(ValueError, df.to_json, orient='index')
self.assertRaises(ValueError, df.to_json, orient='columns')
self.assertRaises(ValueError, df.to_json, orient='records')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split', dtype=False))
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
# GH4377; duplicate columns not processing correctly
df = DataFrame([['a', 'b'], ['c', 'd']], index=[
1, 2], columns=['x', 'y'])
result = read_json(df.to_json(orient='split'), orient='split')
assert_frame_equal(result, df)
def _check(df):
result = read_json(df.to_json(orient='split'), orient='split',
convert_dates=['x'])
assert_frame_equal(result, df)
for o in [[['a', 'b'], ['c', 'd']],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp('20130101'), 3.5],
[Timestamp('20130102'), 4.5]]]:
_check(DataFrame(o, index=[1, 2], columns=['x', 'x']))
def test_frame_from_json_to_json(self):
def _check_orient(df, orient, dtype=None, numpy=False,
convert_axes=True, check_dtype=True, raise_ok=None,
sort=None, check_index_type=True,
check_column_type=True, check_numpy_dtype=False):
if sort is not None:
df = df.sort_values(sort)
else:
df = df.sort_index()
# if we are not unique, then check that we are raising ValueError
# for the appropriate orients
if not df.index.is_unique and orient in ['index', 'columns']:
self.assertRaises(
ValueError, lambda: df.to_json(orient=orient))
return
if (not df.columns.is_unique and
orient in ['index', 'columns', 'records']):
self.assertRaises(
ValueError, lambda: df.to_json(orient=orient))
return
dfjson = df.to_json(orient=orient)
try:
unser = read_json(dfjson, orient=orient, dtype=dtype,
numpy=numpy, convert_axes=convert_axes)
except Exception as detail:
if raise_ok is not None:
if isinstance(detail, raise_ok):
return
raise
if sort is not None and sort in unser.columns:
unser = unser.sort_values(sort)
else:
unser = unser.sort_index()
if dtype is False:
check_dtype = False
if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(
unser.index.values.astype('i8') * 1e6)
if orient == "records":
# index is not captured in this orientation
assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
self.assert_index_equal(df.columns, unser.columns,
exact=check_column_type)
elif orient == "values":
# index and cols are not captured in this orientation
if numpy is True and df.shape == (0, 0):
assert unser.shape[0] == 0
else:
assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
elif orient == "split":
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
unser.columns = [str(i) for i in unser.columns]
if sort is None:
unser = unser.sort_index()
assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
else:
if convert_axes:
assert_frame_equal(df, unser, check_dtype=check_dtype,
check_index_type=check_index_type,
check_column_type=check_column_type)
else:
assert_frame_equal(df, unser, check_less_precise=False,
check_dtype=check_dtype)
def _check_all_orients(df, dtype=None, convert_axes=True,
raise_ok=None, sort=None, check_index_type=True,
check_column_type=True):
# numpy=False
if convert_axes:
_check_orient(df, "columns", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "records", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "split", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "index", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "values", dtype=dtype,
convert_axes=False, sort=sort)
# numpy=True and raise_ok might be not None, so ignore the error
if convert_axes:
_check_orient(df, "columns", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "records", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "split", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "index", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "values", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
# basic
_check_all_orients(self.frame)
self.assertEqual(self.frame.to_json(),
self.frame.to_json(orient="columns"))
_check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
_check_all_orients(self.intframe, dtype=False)
# big one
# index and columns are strings as all unserialised JSON object keys
# are assumed to be strings
biggie = DataFrame(np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)])
_check_all_orients(biggie, dtype=False, convert_axes=False)
# dtypes
_check_all_orients(DataFrame(biggie, dtype=np.float64),
dtype=np.float64, convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int,
convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype='U3'), dtype='U3',
convert_axes=False, raise_ok=ValueError)
# categorical
_check_all_orients(self.categorical, sort='sort', raise_ok=ValueError)
# empty
_check_all_orients(self.empty_frame, check_index_type=False,
check_column_type=False)
# time series data
_check_all_orients(self.tsframe)
# mixed data
index = pd.Index(['a', 'b', 'c', 'd', 'e'])
data = {'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': [True, False, True, False, True]}
df = DataFrame(data=data, index=index)
_check_orient(df, "split", check_dtype=False)
_check_orient(df, "records", check_dtype=False)
_check_orient(df, "values", check_dtype=False)
_check_orient(df, "columns", check_dtype=False)
# index oriented is problematic as it is read back in in a transposed
# state, so the columns are interpreted as having mixed data and
# given object dtypes.
# force everything to have object dtype beforehand
_check_orient(df.transpose().transpose(), "index", dtype=False)
def test_frame_from_json_bad_data(self):
self.assertRaises(ValueError, read_json, StringIO('{"key":b:a:d}'))
# too few indices
json = StringIO('{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
self.assertRaises(ValueError, read_json, json,
orient="split")
# too many columns
json = StringIO('{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
self.assertRaises(AssertionError, read_json, json,
orient="split")
# bad key
json = StringIO('{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
with tm.assertRaisesRegexp(ValueError, r"unexpected key\(s\): badkey"):
read_json(json, orient="split")
def test_frame_from_json_nones(self):
df = DataFrame([[1, 2], [4, 5, 6]])
unser = read_json(df.to_json())
self.assertTrue(np.isnan(unser[2][0]))
df = DataFrame([['1', '2'], ['4', '5', '6']])
unser = read_json(df.to_json())
self.assertTrue(np.isnan(unser[2][0]))
unser = read_json(df.to_json(), dtype=False)
self.assertTrue(unser[2][0] is None)
unser = read_json(df.to_json(), convert_axes=False, dtype=False)
self.assertTrue(unser['2']['0'] is None)
unser = read_json(df.to_json(), numpy=False)
self.assertTrue(np.isnan(unser[2][0]))
unser = read_json(df.to_json(), numpy=False, dtype=False)
self.assertTrue(unser[2][0] is None)
unser = read_json(df.to_json(), numpy=False,
convert_axes=False, dtype=False)
self.assertTrue(unser['2']['0'] is None)
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = np.inf
unser = read_json(df.to_json())
self.assertTrue(np.isnan(unser[2][0]))
unser = read_json(df.to_json(), dtype=False)
self.assertTrue(np.isnan(unser[2][0]))
df.loc[0, 2] = np.NINF
unser = read_json(df.to_json())
self.assertTrue(np.isnan(unser[2][0]))
unser = read_json(df.to_json(), dtype=False)
self.assertTrue(np.isnan(unser[2][0]))
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
self.assertRaises(ValueError, df.to_json, orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=['jim', 'joe'])
self.assertFalse(df._is_mixed_type)
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=['jim', 'joe'])
df['joe'] = df['joe'].astype('i8')
self.assertTrue(df._is_mixed_type)
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
def test_frame_mixedtype_orient(self): # GH10289
vals = [[10, 1, 'foo', .1, .01],
[20, 2, 'bar', .2, .02],
[30, 3, 'baz', .3, .03],
[40, 4, 'qux', .4, .04]]
df = DataFrame(vals, index=list('abcd'),
columns=['1st', '2nd', '3rd', '4th', '5th'])
self.assertTrue(df._is_mixed_type)
right = df.copy()
for orient in ['split', 'index', 'columns']:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient='records')
left = read_json(inp, orient='records', convert_axes=False)
assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient='values')
left = read_json(inp, orient='values', convert_axes=False)
assert_frame_equal(left, right)
def test_v12_compat(self):
df = DataFrame(
[[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478]],
columns=['A', 'B', 'C', 'D'],
index=pd.date_range('2000-01-03', '2000-01-07'))
df['date'] = pd.Timestamp('19920106 18:21:32.12')
df.ix[3, 'date'] = pd.Timestamp('20130101')
df['modified'] = df['date']
df.ix[1, 'modified'] = pd.NaT
v12_json = os.path.join(self.dirpath, 'tsframe_v012.json')
df_unser = pd.read_json(v12_json)
assert_frame_equal(df, df_unser)
df_iso = df.drop(['modified'], axis=1)
v12_iso_json = os.path.join(self.dirpath, 'tsframe_iso_v012.json')
df_unser_iso = pd.read_json(v12_iso_json)
assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range('20000101', periods=10, freq='H')
df_mixed = DataFrame(OrderedDict(
float_1=[-0.92077639, 0.77434435, 1.25234727, 0.61485564,
-0.60316077, 0.24653374, 0.28668979, -2.51969012,
0.95748401, -1.02970536],
int_1=[19680418, 75337055, 99973684, 65103179, 79373900,
40314334, 21290235, 4991321, 41903419, 16008365],
str_1=['78c608f1', '64a99743', '13d2ff52', 'ca7f4af2', '97236474',
'bde7e214', '1a6bde47', 'b1190be5', '7a669144', '8d64d068'],
float_2=[-0.0428278, -1.80872357, 3.36042349, -0.7573685,
-0.48217572, 0.86229683, 1.08935819, 0.93898739,
-0.03030452, 1.43366348],
str_2=['14f04af9', 'd085da90', '4bcfac83', '81504caf', '2ffef4a9',
'08e2f5c4', '07e1af03', 'addbd4a7', '1f6a09ba', '4bfc4d87'],
int_2=[86967717, 98098830, 51927505, 20372254, 12601730, 20884027,
34193846, 10561746, 24867120, 76131025]
), index=index)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype('unicode')
df_roundtrip = pd.read_json(df_mixed.to_json(orient='split'),
orient='split')
assert_frame_equal(df_mixed, df_roundtrip,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
by_blocks=True,
check_exact=True)
def test_series_non_unique_index(self):
s = Series(['a', 'b'], index=[1, 1])
self.assertRaises(ValueError, s.to_json, orient='index')
assert_series_equal(s, read_json(s.to_json(orient='split'),
orient='split', typ='series'))
unser = read_json(s.to_json(orient='records'),
orient='records', typ='series')
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_from_json_to_json(self):
def _check_orient(series, orient, dtype=None, numpy=False,
check_index_type=True):
series = series.sort_index()
unser = read_json(series.to_json(orient=orient),
typ='series', orient=orient, numpy=numpy,
dtype=dtype)
unser = unser.sort_index()
if orient == "records" or orient == "values":
assert_almost_equal(series.values, unser.values)
else:
if orient == "split":
assert_series_equal(series, unser,
check_index_type=check_index_type)
else:
assert_series_equal(series, unser, check_names=False,
check_index_type=check_index_type)
def _check_all_orients(series, dtype=None, check_index_type=True):
_check_orient(series, "columns", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype)
_check_orient(series, "columns", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype, numpy=True,
check_index_type=check_index_type)
# basic
_check_all_orients(self.series)
self.assertEqual(self.series.to_json(),
self.series.to_json(orient="index"))
objSeries = Series([str(d) for d in self.objSeries],
index=self.objSeries.index,
name=self.objSeries.name)
_check_all_orients(objSeries, dtype=False)
# empty_series has empty index with object dtype
# which cannot be revert
self.assertEqual(self.empty_series.index.dtype, np.object_)
_check_all_orients(self.empty_series, check_index_type=False)
_check_all_orients(self.ts)
# dtype
s = Series(lrange(6), index=['a', 'b', 'c', 'd', 'e', 'f'])
_check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
_check_all_orients(Series(s, dtype=np.int), dtype=np.int)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
self.assertRaises(ValueError, s.to_json, orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ='series', precise_float=True)
assert_series_equal(result, s, check_index_type=False)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
assert_frame_equal(result, df, check_index_type=False,
check_column_type=False)
def test_typ(self):
s = Series(lrange(6), index=['a', 'b', 'c',
'd', 'e', 'f'], dtype='int64')
result = read_json(s.to_json(), typ=None)
assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
assert_frame_equal(result, df)
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['A', 'B', 'C'])
result = read_json(df.to_json())
assert_frame_equal(result, df)
def test_path(self):
with ensure_clean('test.json') as path:
for df in [self.frame, self.frame2, self.intframe, self.tsframe,
self.mixed_frame]:
df.to_json(path)
read_json(path)
def test_axis_dates(self):
# frame
json = self.tsframe.to_json()
result = read_json(json)
assert_frame_equal(result, self.tsframe)
# series
json = self.ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, self.ts, check_names=False)
self.assertTrue(result.name is None)
def test_convert_dates(self):
# frame
df = self.tsframe.copy()
df['date'] = Timestamp('20130101')
json = df.to_json()
result = read_json(json)
assert_frame_equal(result, df)
df['foo'] = 1.
json = df.to_json(date_unit='ns')
result = read_json(json, convert_dates=False)
expected = df.copy()
expected['date'] = expected['date'].values.view('i8')
expected['foo'] = expected['foo'].astype('int64')
assert_frame_equal(result, expected)
# series
ts = Series(Timestamp('20130101'), index=self.ts.index)
json = ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, ts)
def test_convert_dates_infer(self):
# GH10747
infer_words = ['trade_time', 'date', 'datetime', 'sold_at',
'modified', 'timestamp', 'timestamps']
for infer_word in infer_words:
data = [{'id': 1, infer_word: 1036713600000}, {'id': 2}]
expected = DataFrame([[1, Timestamp('2002-11-08')], [2, pd.NaT]],
columns=['id', infer_word])
result = read_json(pd.json.dumps(data))[['id', infer_word]]
assert_frame_equal(result, expected)
def test_date_format_frame(self):
df = self.tsframe.copy()
def test_w_date(date, date_unit=None):
df['date'] = Timestamp(date)
df.ix[1, 'date'] = pd.NaT
df.ix[5, 'date'] = pd.NaT
if date_unit:
json = df.to_json(date_format='iso', date_unit=date_unit)
else:
json = df.to_json(date_format='iso')
result = read_json(json)
assert_frame_equal(result, df)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
self.assertRaises(ValueError, df.to_json, date_format='iso',
date_unit='foo')
def test_date_format_series(self):
def test_w_date(date, date_unit=None):
ts = Series(Timestamp(date), index=self.ts.index)
ts.ix[1] = pd.NaT
ts.ix[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format='iso', date_unit=date_unit)
else:
json = ts.to_json(date_format='iso')
result = read_json(json, typ='series')
assert_series_equal(result, ts)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
ts = Series(Timestamp('20130101 20:43:42.123'), index=self.ts.index)
self.assertRaises(ValueError, ts.to_json, date_format='iso',
date_unit='foo')
def test_date_unit(self):
df = self.tsframe.copy()
df['date'] = Timestamp('20130101 20:43:42')
df.ix[1, 'date'] = Timestamp('19710101 20:43:42')
df.ix[2, 'date'] = Timestamp('21460101 20:43:42')
df.ix[4, 'date'] = pd.NaT
for unit in ('s', 'ms', 'us', 'ns'):
json = df.to_json(date_format='epoch', date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r'''{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}'''
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list('AB'))
dfj2['date'] = Timestamp('20130101')
dfj2['ints'] = lrange(5)
dfj2['bools'] = True
dfj2.index = pd.date_range('20130101', periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={'ints': np.int64, 'bools': np.bool_})
assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\[u?'a', u?'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with tm.assertRaisesRegexp(AssertionError, error_msg):
assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
@network
def test_round_trip_exception_(self):
# GH 3867
csv = 'https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv'
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
assert_frame_equal(result.reindex(
index=df.index, columns=df.columns), df)
@network
def test_url(self):
url = 'https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5' # noqa
result = read_json(url, convert_dates=True)
for c in ['created_at', 'closed_at', 'updated_at']:
self.assertEqual(result[c].dtype, 'datetime64[ns]')
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit='ms')
s = Series([timedelta(23), timedelta(seconds=5)])
self.assertEqual(s.dtype, 'timedelta64[ns]')
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)],
index=pd.Index([0, 1]))
self.assertEqual(s.dtype, 'timedelta64[ns]')
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
self.assertEqual(frame[0].dtype, 'timedelta64[ns]')
assert_frame_equal(frame, pd.read_json(frame.to_json())
.apply(converter))
frame = DataFrame({'a': [timedelta(days=23), timedelta(seconds=5)],
'b': [1, 2],
'c': pd.date_range(start='20130101', periods=2)})
result = pd.read_json(frame.to_json(date_unit='ns'))
result['a'] = pd.to_timedelta(result.a, unit='ns')
result['c'] = pd.to_datetime(result.c)
assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame({'a': [timedelta(23), pd.Timestamp('20130101')]},
dtype=object)
expected = DataFrame({'a': [pd.Timedelta(frame.a[0]).value,
pd.Timestamp(frame.a[1]).value]})
result = pd.read_json(frame.to_json(date_unit='ns'),
dtype={'a': 'int64'})
assert_frame_equal(result, expected, check_index_type=False)
def test_default_handler(self):
value = object()
frame = DataFrame({'a': [7, value]})
expected = DataFrame({'a': [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [('mathjs', 'Complex'),
('re', obj.real),
('im', obj.imag)]
return str(obj)
df_list = [9, DataFrame({'a': [1, 'STR', complex(4, -5)],
'b': [float('nan'), None, 'N/A']},
columns=['a', 'b'])]
expected = ('[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]')
self.assertEqual(expected, dumps(df_list, default_handler=default,
orient="values"))
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame({'a': [1, 2.3, complex(4, -5)],
'b': [float('nan'), None, complex(1.2, 0)]},
columns=['a', 'b'])
expected = ('[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]')
self.assertEqual(expected, df.to_json(default_handler=str,
orient="values"))
def test_default_handler_raises(self):
def my_handler_raises(obj):
raise TypeError("raisin")
self.assertRaises(TypeError,
DataFrame({'a': [1, 2, object()]}).to_json,
default_handler=my_handler_raises)
self.assertRaises(TypeError,
DataFrame({'a': [1, 2, complex(4, -5)]}).to_json,
default_handler=my_handler_raises)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype('category')
self.assertEqual(expected, df.to_json())
s = df["A"]
sc = df["B"]
self.assertEqual(s.to_json(), sc.to_json())
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range('20130101', periods=3, tz='US/Eastern')
tz_naive = tz_range.tz_convert('utc').tz_localize(None)
df = DataFrame({
'A': tz_range,
'B': pd.date_range('20130101', periods=3)})
df_naive = df.copy()
df_naive['A'] = tz_naive
expected = df_naive.to_json()
self.assertEqual(expected, df.to_json())
stz = Series(tz_range)
s_naive = Series(tz_naive)
self.assertEqual(stz.to_json(), s_naive.to_json())
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.ix[:8] = np.nan
sdf = df.to_sparse()
expected = df.to_json()
self.assertEqual(expected, sdf.to_json())
s = pd.Series(np.random.randn(10))
s.ix[:8] = np.nan
ss = s.to_sparse()
expected = s.to_json()
self.assertEqual(expected, ss.to_json())
def test_tz_is_utc(self):
exp = '"2013-01-10T05:00:00.000Z"'
ts = Timestamp('2013-01-10 05:00:00Z')
self.assertEqual(exp, pd.json.dumps(ts, iso_dates=True))
dt = ts.to_pydatetime()
self.assertEqual(exp, pd.json.dumps(dt, iso_dates=True))
ts = Timestamp('2013-01-10 00:00:00', tz='US/Eastern')
self.assertEqual(exp, pd.json.dumps(ts, iso_dates=True))
dt = ts.to_pydatetime()
self.assertEqual(exp, pd.json.dumps(dt, iso_dates=True))
ts = Timestamp('2013-01-10 00:00:00-0500')
self.assertEqual(exp, pd.json.dumps(ts, iso_dates=True))
dt = ts.to_pydatetime()
self.assertEqual(exp, pd.json.dumps(dt, iso_dates=True))
def test_tz_range_is_utc(self):
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = ('{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}')
tz_range = pd.date_range('2013-01-01 05:00:00Z', periods=2)
self.assertEqual(exp, pd.json.dumps(tz_range, iso_dates=True))
dti = pd.DatetimeIndex(tz_range)
self.assertEqual(exp, pd.json.dumps(dti, iso_dates=True))
df = DataFrame({'DT': dti})
self.assertEqual(dfexp, pd.json.dumps(df, iso_dates=True))
tz_range = pd.date_range('2013-01-01 00:00:00', periods=2,
tz='US/Eastern')
self.assertEqual(exp, pd.json.dumps(tz_range, iso_dates=True))
dti = pd.DatetimeIndex(tz_range)
self.assertEqual(exp, pd.json.dumps(dti, iso_dates=True))
df = DataFrame({'DT': dti})
self.assertEqual(dfexp, pd.json.dumps(df, iso_dates=True))
tz_range = pd.date_range('2013-01-01 00:00:00-0500', periods=2)
self.assertEqual(exp, pd.json.dumps(tz_range, iso_dates=True))
dti = pd.DatetimeIndex(tz_range)
self.assertEqual(exp, pd.json.dumps(dti, iso_dates=True))
df = DataFrame({'DT': dti})
self.assertEqual(dfexp, pd.json.dumps(df, iso_dates=True))
def test_read_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_to_jsonl(self):
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
self.assertEqual(result, expected)
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
self.assertEqual(result, expected)
assert_frame_equal(pd.read_json(result, lines=True), df)
def test_latin_encoding(self):
if compat.PY2:
self.assertRaisesRegexp(
TypeError, r'\[unicode\] is not implemented as a table column')
return
# GH 13774
raise nose.SkipTest("encoding not implemented in .to_json(), "
"xref #13774")
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding='latin-1'):
with ensure_clean('test.json') as path:
s.to_json(path, encoding=encoding)
retr = read_json(path, encoding=encoding)
assert_series_equal(s, retr, check_categorical=False)
for s in examples:
roundtrip(s)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb',
'--pdb-failure', '-s'], exit=False)
| gpl-3.0 |
nborggren/zipline | zipline/pipeline/loaders/blaze/earnings.py | 2 | 4491 | from datashape import istabular
import pandas as pd
from toolz import valmap
from .core import (
TS_FIELD_NAME,
SID_FIELD_NAME,
bind_expression_to_resources,
ffill_query_in_range,
)
from zipline.pipeline.data import EarningsCalendar
from zipline.pipeline.loaders.base import PipelineLoader
from zipline.pipeline.loaders.earnings import EarningsCalendarLoader
from zipline.pipeline.loaders.utils import (
check_data_query_args,
normalize_data_query_bounds,
normalize_timestamp_to_query_time,
)
from zipline.utils.input_validation import ensure_timezone, optionally
from zipline.utils.preprocess import preprocess
ANNOUNCEMENT_FIELD_NAME = 'announcement_date'
class BlazeEarningsCalendarLoader(PipelineLoader):
"""A pipeline loader for the ``EarningsCalendar`` dataset that loads
data from a blaze expression.
Parameters
----------
expr : Expr
The expression representing the data to load.
resources : dict, optional
Mapping from the atomic terms of ``expr`` to actual data resources.
odo_kwargs : dict, optional
Extra keyword arguments to pass to odo when executing the expression.
data_query_time : time, optional
The time to use for the data query cutoff.
data_query_tz : tzinfo or str
The timezeone to use for the data query cutoff.
Notes
-----
The expression should have a tabular dshape of::
Dim * {{
{SID_FIELD_NAME}: int64,
{TS_FIELD_NAME}: datetime,
{ANNOUNCEMENT_FIELD_NAME}: ?datetime,
}}
Where each row of the table is a record including the sid to identify the
company, the timestamp where we learned about the announcement, and the
date when the earnings will be announced.
If the '{TS_FIELD_NAME}' field is not included it is assumed that we
start the backtest with knowledge of all announcements.
"""
__doc__ = __doc__.format(
TS_FIELD_NAME=TS_FIELD_NAME,
SID_FIELD_NAME=SID_FIELD_NAME,
ANNOUNCEMENT_FIELD_NAME=ANNOUNCEMENT_FIELD_NAME,
)
_expected_fields = frozenset({
TS_FIELD_NAME,
SID_FIELD_NAME,
ANNOUNCEMENT_FIELD_NAME,
})
@preprocess(data_query_tz=optionally(ensure_timezone))
def __init__(self,
expr,
resources=None,
odo_kwargs=None,
data_query_time=None,
data_query_tz=None,
dataset=EarningsCalendar):
dshape = expr.dshape
if not istabular(dshape):
raise ValueError(
'expression dshape must be tabular, got: %s' % dshape,
)
expected_fields = self._expected_fields
self._expr = bind_expression_to_resources(
expr[list(expected_fields)],
resources,
)
self._odo_kwargs = odo_kwargs if odo_kwargs is not None else {}
self._dataset = dataset
check_data_query_args(data_query_time, data_query_tz)
self._data_query_time = data_query_time
self._data_query_tz = data_query_tz
def load_adjusted_array(self, columns, dates, assets, mask):
data_query_time = self._data_query_time
data_query_tz = self._data_query_tz
lower_dt, upper_dt = normalize_data_query_bounds(
dates[0],
dates[-1],
data_query_time,
data_query_tz,
)
raw = ffill_query_in_range(
self._expr,
lower_dt,
upper_dt,
self._odo_kwargs,
)
sids = raw.loc[:, SID_FIELD_NAME]
raw.drop(
sids[~sids.isin(assets)].index,
inplace=True
)
if data_query_time is not None:
normalize_timestamp_to_query_time(
raw,
data_query_time,
data_query_tz,
inplace=True,
ts_field=TS_FIELD_NAME,
)
gb = raw.groupby(SID_FIELD_NAME)
def mkseries(idx, raw_loc=raw.loc):
vs = raw_loc[
idx, [TS_FIELD_NAME, ANNOUNCEMENT_FIELD_NAME]
].values
return pd.Series(
index=pd.DatetimeIndex(vs[:, 0]),
data=vs[:, 1],
)
return EarningsCalendarLoader(
dates,
valmap(mkseries, gb.groups),
dataset=self._dataset,
).load_adjusted_array(columns, dates, assets, mask)
| apache-2.0 |
camptocamp/QGIS | python/plugins/processing/algs/BarPlot.py | 1 | 2901 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BarPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from PyQt4.QtCore import *
from qgis.core import *
from processing.parameters.ParameterTable import ParameterTable
from processing.parameters.ParameterTableField import ParameterTableField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.outputs.OutputHTML import OutputHTML
from processing.tools import *
from processing.tools import dataobjects
class BarPlot(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
NAME_FIELD = "NAME_FIELD"
VALUE_FIELD = "VALUE_FIELD"
def processAlgorithm(self, progress):
uri = self.getParameterValue(self.INPUT)
layer = dataobjects.getObjectFromUri(uri)
namefieldname = self.getParameterValue(self.NAME_FIELD)
valuefieldname = self.getParameterValue(self.VALUE_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.getAttributeValues(layer, namefieldname, valuefieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[valuefieldname], width, color='r')
plt.xticks(ind, values[namefieldname], rotation = 45)
plotFilename = output +".png"
lab.savefig(plotFilename)
f = open(output, "w")
f.write("<img src=\"" + plotFilename + "\"/>")
f.close()
def defineCharacteristics(self):
self.name = "Bar plot"
self.group = "Graphics"
self.addParameter(ParameterTable(self.INPUT, "Input table"))
self.addParameter(ParameterTableField(self.NAME_FIELD, "Category name field", self.INPUT))
self.addParameter(ParameterTableField(self.VALUE_FIELD, "Value field", self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, "Output"))
| gpl-2.0 |
MostafaGazar/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 4 | 6151 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + self._data.keys())
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
harshaneelhg/scikit-learn | sklearn/tests/test_dummy.py | 129 | 17774 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
stanmoore1/lammps | examples/SPIN/test_problems/validation_damped_exchange/plot_precession.py | 9 | 1111 | #!/usr/bin/env python3
import numpy as np, pylab, tkinter
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from decimal import *
import sys, string, os
argv = sys.argv
if len(argv) != 3:
print("Syntax: ./plot_precession.py res_lammps.dat res_llg.dat")
sys.exit()
lammps_file = sys.argv[1]
llg_file = sys.argv[2]
t_lmp,Sx_lmp,Sy_lmp,Sz_lmp,e_lmp = np.loadtxt(lammps_file,skiprows=0, usecols=(1,2,3,4,7),unpack=True)
t_llg,Sx_llg,Sy_llg,Sz_llg,e_llg = np.loadtxt(llg_file,skiprows=0, usecols=(0,1,2,3,4),unpack=True)
plt.figure()
plt.subplot(411)
plt.ylabel('Sx')
plt.plot(t_lmp, Sx_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, Sx_llg, 'r--', label='LLG')
plt.subplot(412)
plt.ylabel('Sy')
plt.plot(t_lmp, Sy_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, Sy_llg, 'r--', label='LLG')
plt.subplot(413)
plt.ylabel('Sz')
plt.plot(t_lmp, Sz_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, Sz_llg, 'r--', label='LLG')
plt.subplot(414)
plt.ylabel('E (eV)')
plt.plot(t_lmp, e_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, e_llg, 'r--', label='LLG')
plt.xlabel('time (in ps)')
plt.legend()
plt.show()
| gpl-2.0 |
rhyolight/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colors.py | 69 | 31676 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given a len 3 rgb tuple of 0-1 floats, return the hex string'
return '#%02x%02x%02x' % tuple([round(val*255) for val in rgb])
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
color = self.colors.get(arg, None)
if color is None:
str1 = cnames.get(arg, arg)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(arg)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4'%len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], arg[3] * alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
if c.lower() == 'none':
return np.zeros((0,4), dtype=np.float_)
except AttributeError:
pass
if len(c) == 0:
return np.zeros((0,4), dtype=np.float_)
try:
result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
result = np.zeros((len(c), 4))
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha) # change in place
return np.asarray(result, np.float_)
colorConverter = ColorConverter()
def makeMappingArray(N, data):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = np.arange(float(N))
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=1.0, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
self._lut[:-3, -1] = alpha
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.asarray(X)
xa = xma.filled(0)
mask_bad = ma.getmask(xma)
if xa.dtype.char in np.typecodes['Float']:
np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1.
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
if NP_CLIP_OUT:
np.clip(xa * self.N, -1, self.N, out=xa)
else:
xa = np.clip(xa * self.N, -1, self.N)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
np.putmask(xa, xa>self.N-1, self._i_over)
np.putmask(xa, xa<0, self._i_under)
if mask_bad is not None and mask_bad.shape == xa.shape:
np.putmask(xa, mask_bad, self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = 1.0):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = 1.0):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = 1.0):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:func:`makeMappingArray`
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red'])
self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green'])
self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue'])
self._isinit = True
self._set_extremes()
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.minimum(A)
self.vmax = ma.maximum(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None: self.vmin = ma.minimum(A)
if self.vmax is None: self.vmax = ma.maximum(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
| agpl-3.0 |
marcocaccin/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
kmather73/content | labs/lab2/cs109style.py | 38 | 1293 | from __future__ import print_function
from IPython.core.display import HTML
from matplotlib import rcParams
#colorbrewer2 Dark2 qualitative color table
dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843),
(0.4, 0.4, 0.4)]
def customize_mpl():
"""Tweak matplotlib visual style"""
print("Setting custom matplotlib visual style")
rcParams['figure.figsize'] = (10, 6)
rcParams['figure.dpi'] = 150
rcParams['axes.color_cycle'] = dark2_colors
rcParams['lines.linewidth'] = 2
rcParams['axes.grid'] = True
rcParams['axes.facecolor'] = '#eeeeee'
rcParams['font.size'] = 14
rcParams['patch.edgecolor'] = 'none'
def customize_css():
print("Setting custom CSS for the IPython Notebook")
styles = open('custom.css', 'r').read()
return HTML(styles)
| mit |
ChanChiChoi/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
jpautom/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
flyingbanana1024102/transmission-line-simulator | src/views/sourceeditor.py | 1 | 5620 | #
# Transmission Line Simulator
#
# Author(s): Jiacong Xu
# Created: Jul-14-2017
#
from popupeditor import PopupEditor
from kivy.properties import *
from materialbutton import MaterialButton
from util.constants import *
from kivy.animation import Animation
from models.powersource import *
from kivy.metrics import *
import util.signal as signal
import matplotlib.pyplot as plt
import numpy as np
from kivy.uix.boxlayout import BoxLayout
class SourceEditor(PopupEditor):
"""
Supports wire editing.
"""
impedanceTextField = ObjectProperty(None)
widthTextField = ObjectProperty(None)
voltageTextField = ObjectProperty(None)
prevButton = ObjectProperty(None)
nextButton = ObjectProperty(None)
gaussButton = ObjectProperty(None)
squareButton = ObjectProperty(None)
triangleButton = ObjectProperty(None)
triangleButton = ObjectProperty(None)
selection = ObjectProperty(None)
def __init__(self, source, **kwargs):
super(SourceEditor, self).__init__(**kwargs)
self._source = source
self.gaussButton.changeStyle('flat')
self.squareButton.changeStyle('flat')
self.triangleButton.changeStyle('flat')
self.prevButton.changeStyle('flat')
self.nextButton.changeStyle('flat')
self.prevButton.iconLabel.color = PRIMARY
self.nextButton.iconLabel.color = PRIMARY
self.prevButton.on_press = self.showPrev
self.nextButton.on_press = self.showNext
self.gaussButton.on_press = lambda: self.onWaveShapeClicked(WaveShape.Gaussian)
self.squareButton.on_press = lambda: self.onWaveShapeClicked(WaveShape.Square)
self.triangleButton.on_press = lambda: self.onWaveShapeClicked(WaveShape.Triangle)
self.animateSwitch(source.shape, False)
self._anim = None
self._setupIcons()
def _setupIcons(self):
"""
Add icons to buttons.
"""
x = np.linspace(0, 10, 50)
y = signal.gaussian(50, 7)
self.gaussButton.container.add_widget(self._generateIcon(x, y))
y0 = [0] * 10
y = [1] * 30
self.squareButton.container.add_widget(self._generateIcon(x, y0 + y + y0))
y = []
for i in range(15):
y.append(i / 15.0)
for i in range(15):
y.append(1 - i / 15.0)
self.triangleButton.container.add_widget(self._generateIcon(x, y0 + y + y0))
def _generateIcon(self, x, y):
fig, ax = plt.subplots()
fig.set_tight_layout({"pad": 0})
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.tick_params(axis = 'both', length = 0)
ax.set_frame_on(False)
ax.set_ylim([-0.1, 1.1])
ax.plot(x, y, linewidth = dp(2), color = TEXT_BLACK)[0]
return fig.canvas
def on_focus(self, instance, focus):
if instance == self.impedanceTextField.inputText and not focus:
# Update impedance.
if len(self.impedanceTextField.text) == 0:
self._source.impedance = 0
else:
self._source.impedance = float(self.impedanceTextField.text)
if instance == self.voltageTextField.inputText and not focus:
# Update voltage.
if len(self.voltageTextField.text) == 0:
self._source.amplitude = 0
else:
self._source.amplitude = float(self.voltageTextField.text)
if instance == self.widthTextField.inputText and not focus:
# Update impedance.
if len(self.widthTextField.text) == 0:
self._source.width = 0
else:
self._source.width = float(self.widthTextField.text)
def onWaveShapeClicked(self, shape):
self._source.shape = shape
self.animateSwitch(shape, True)
def updateValues(self):
self.prevButton.disabled = self._source.prev == None
self.nextButton.disabled = self._source.next == None
self.impedanceTextField.text = '{:g}'.format(self._source.impedance)
self.impedanceTextField.inputText.input_filter = 'float'
self.impedanceTextField.inputText.bind(focus = self.on_focus)
self.voltageTextField.text = '{:g}'.format(self._source.amplitude)
self.voltageTextField.inputText.input_filter = 'float'
self.voltageTextField.inputText.bind(focus = self.on_focus)
self.widthTextField.text = '{:g}'.format(self._source.width)
self.widthTextField.inputText.input_filter = 'float'
self.widthTextField.inputText.bind(focus = self.on_focus)
self.impedanceTextField.animateLabel(False)
self.voltageTextField.animateLabel(False)
self.widthTextField.animateLabel(False)
# Animation is true here to make sure the selection ends up in the correct position on top
# of the popup show animation.
self.animateSwitch(self._source.shape, True)
def animateSwitch(self, mode, animated):
if self._anim != None:
self._anim.cancel(self.selection)
t = 0.3 if animated else 0
if mode == WaveShape.Gaussian:
self._anim = Animation(center = self.gaussButton.center, d = t, t = 'in_out_quad')
elif mode == WaveShape.Square:
self._anim = Animation(center = self.squareButton.center, d = t, t = 'in_out_quad')
else:
self._anim = Animation(center = self.triangleButton.center, d = t, t = 'in_out_quad')
self._anim.start(self.selection)
def showPrev(self):
self.onPrev()
def showNext(self):
self.onNext()
| mit |
dtiarks/ThesisPlot | Chap5/Giovanni/post_selected/giovanni.py | 1 | 8588 | import numpy as np
from matplotlib import pyplot as plt
from datetime import datetime
import re
from scipy.optimize import curve_fit
from matplotlib import rc
#curve=np.loadtxt("curve.tsv")
histoLst=[]
histoPostLst=[]
histoRefLst=[]
xLst=np.arange(-8,-12.5,-1)
#print xLst
name="ref001"
nameRef="ref001"
##for n in ["histo_%03d.tsv"%i for i in range(2,5)]:
## a=np.loadtxt(n)
## #print a
## histoLst.append(np.asarray(a))
##
##
##for n in ["histoPost_%03d.tsv"%i for i in range(2,5)]:
## histoPostLst.append(np.loadtxt(n))
##
##for n in ["histoRef_%03d.tsv"%i for i in range(2,5)]:
## histoRefLst.append(np.loadtxt(n))
histo=np.loadtxt("histo%s.tsv"%name)
histoPost=np.loadtxt("histoPost%s.tsv"%name)
histoRef=np.loadtxt("histoRef%s.tsv"%name)
meanRef=np.loadtxt("meanRef%s.tsv"%name)
numEvents=np.loadtxt("numEvents%s.tsv"%name)
ifile=open("histo%s.tsv"%name, "r")
s=ifile.read()
RoiTarget=tuple(float(x)*1e6 for x in re.search("'RoiTarget', \((.*?),(.*?)\)", s).groups())
RoiRetrieval=tuple(float(x)*1e6 for x in re.search("'RoiRetrieval', \((.*?),(.*?)\)", s).groups())
ifile.close()
histoRef2=np.loadtxt("histo%s.tsv"%nameRef)
histoPostRef=np.loadtxt("histoPost%s.tsv"%nameRef)
histoRefRef=np.loadtxt("histoRef%s.tsv"%nameRef)
meanRefRef=np.loadtxt("meanRef%s.tsv"%nameRef)
ifileRef=open("histo%s.tsv"%nameRef, "r")
sRef=ifileRef.read()
RoiTargetRef=tuple(float(x)*1e6 for x in re.search("'RoiTarget', \((.*?),(.*?)\)", sRef).groups())
RoiRetrievalRef=tuple(float(x)*1e6 for x in re.search("'RoiRetrieval', \((.*?),(.*?)\)", sRef).groups())
ifileRef.close()
fitParameters={
"binningHisto":0.05e-7,
"histoInterval":(11.25,11.75),
"refScale":1/0.0002,
"initRef":[25, 2,25],
"initPost":[25, 2,25],
"xscale":[11.05,11.7],
"delay":9.94
}
binningHisto=fitParameters["binningHisto"]
histoInterval=fitParameters["histoInterval"]
delay=fitParameters["delay"]
A=fitParameters["refScale"]
def fitFunc(t, A, phi,C):
return A*np.cos(2*np.pi*20*(t-11.25)+phi)+C
def resadjust(ax, xres=None, yres=None):
"""
Send in an axis and I fix the resolution as desired.
"""
if xres:
start, stop = ax.get_xlim()
ticks = np.arange(start, stop + xres, xres)
ax.set_xticks(ticks)
if yres:
start, stop = ax.get_ylim()
ticks = np.arange(start, stop + yres, yres)
ax.set_yticks(ticks)
pLst=[]
pErrLst=[]
pLstRef=[]
pErrLstRef=[]
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 13}
rc('font', **font)
#for step in range(0,5):
for step in [0]:
fig=plt.figure(0)
fig2=plt.figure(1)
fig3=plt.figure(2)
fig.clear()
ax1=fig2.add_subplot(111)
# ax12=fig2.add_subplot(212)
ax2=fig.add_subplot(311)
ax3=fig.add_subplot(312)
ax4=fig.add_subplot(313)
fig2.subplots_adjust(left=None, bottom=0.2, right=None, top=None, wspace=None, hspace=0.15)
fig3.subplots_adjust(left=None, bottom=0.2, right=None, top=None, wspace=None, hspace=0.15)
axR=fig3.add_subplot(111)
# ax1.set_title("Step %d:" % step+ " Evaluated " +datetime.now().strftime("%c\n")
# + "Incoming: $N_g$=%.2f " % meanRef[step,0]
# + "$N_t$=%.2f " % meanRef[step,4]
# )
ax1.plot(histo[:,step*6]-delay, histo[:,step*6+2], color='r',label='Gate on')
axR.plot(histoRef[:,step*6]-delay, histoRef[:,step*6+2], color='g', ls="-")
axR.set_xlim([0,12.55-delay])
axR.set_ylim([0,0.11])
# axR.set_ylabel("transmitted photons\n in 50ns",labelpad=-0.05)
# ax12.plot(histo[:,step*6]-delay, histo[:,step*6+4], color='b',label='Gate off')
# ax12.plot(histoRef[:,step*6]-delay, histoRef[:,step*6+4], color='b', ls="--")
resadjust(ax1,yres=0.02,xres=0.3)
resadjust(axR,yres=0.04,xres=0.3)
# resadjust(ax12,yres=0.02,xres=0.3)
ax1.set_xlim([0,12.55-delay])
ax1.set_ylim([0,0.054])
ax1.set_ylabel("transmitted photons\n in 50ns",labelpad=-0.05)
ax1.set_xlabel("Time ($\mu s$)",labelpad=0)
# ax1.legend()
trans = ax1.get_xaxis_transform()
# ax1.annotate('(b)', xy=(0.1,.85 ), xycoords=trans)
# trans = ax12.get_xaxis_transform()
# ax12.annotate('(b)', xy=(0.1,.85 ), xycoords=trans)
axR.tick_params(axis="x",which="both",labelbottom="off")
# ax12.set_xlim([0,12.55-delay])
# ax12.set_ylim([0,0.08])
# ax12.set_ylabel("transmitted photons\n in 50ns",labelpad=-0.05)
# ax12.legend()
b1 = int(histoInterval[0]/(binningHisto*10**6))
b2 = int(histoInterval[1]/(binningHisto*10**6))
hT1=histoPost[b1:b2,step*9]
hT2=histo[b1:b2,step*6]
hCrop1 = histoPost[b1:b2,step*9+2]/histoPost[b1:b2,step*9+4]*3.7
#hCrop1 = histoPost[b1:b2,step*9+2]
hCrop2 = histo[b1:b2,step*6+4]
popt0=fitParameters["initPost"]
popt1=fitParameters["initRef"]
popt0, pcov0=curve_fit(fitFunc, hT1, hCrop1, popt0)
popt1, pcov1=curve_fit(fitFunc, hT2, hCrop2, popt1)
#popt0=fitParameters["initPost"]
#popt1=fitParameters["initRef"]
#print popt0
#print popt1
hCropRef1 = histoRef[b1:b2,step*6+2]
hCropRef2 = histoRef[b1:b2,step*6+4]
poptRef0=fitParameters["initRef"]
poptRef1=fitParameters["initRef"]
poptRef0, pcovRef0=curve_fit(fitFunc, hT2, hCropRef1, poptRef0)
poptRef1, pcovRef1=curve_fit(fitFunc, hT2, hCropRef2, poptRef1)
fT1=np.linspace(hT1[0],hT1[-1],1e3)
fT2=np.linspace(hT2[0],hT2[-1],1e3)
ax4.errorbar(histoPost[:,step*9]-delay, (histoPost[:,step*9+2]/histoPost[:,step*9+4])*3.7, 0*np.sqrt(histoPost[:,step*9+2])/histoPost[:,step*9+4] *3.7 , color='r', ls='', marker='o',label='Gate on')
#ax2.errorbar(histoPost[:,step*9], np.asarray(histoPost[:,step*9+2])/4500*3.7, np.sqrt(np.asarray(histoPost[:,step*9+2]))/numEvents[step,3] , color='r', ls='', marker='o',label='Postselected (on)')
## ax2.errorbar(histoPost[:,step*9], histoPost[:,step*9+2], np.sqrt(histoPost[:,step*9+2]), color='r', ls='', marker='o',label='Postselected (on)')
ax4.plot(fT1-delay,fitFunc(fT1,*popt0),'r-')
ax4.set_xlim([fitParameters["xscale"][0]-delay,fitParameters["xscale"][1]-delay])
ax4.set_ylim([0,0.04])
ax2.tick_params(axis="x",which="both",labelbottom="off")
trans = ax4.get_xaxis_transform()
ax4.text(0.02,0.8,'(c)', transform=ax4.transAxes)
ax4.plot([1.5878,1.5878],[0,0.08],"k--")
# ax2.grid(True)
# ax2.legend()
resadjust(ax4,yres=0.01)
ax3.plot(histo[:,step*6]-delay, histo[:,step*6+4], color='b',ls='', marker='o',label='No gate')
ax3.plot(fT2-delay,fitFunc(fT2,*popt1),'b-')
ax3.set_xlim([fitParameters["xscale"][0]-delay,fitParameters["xscale"][1]-delay])
ax3.set_ylim([0,0.04])
ax3.plot([1.5878,1.5878],[0,0.08],"k--")
ax3.text(0.02,0.8,'(b)', transform=ax3.transAxes)
# ax3.grid(True)
# ax3.set_xlabel("Time ($\mu s$)")
ax3.tick_params(axis="x",which="both",labelbottom="off")
# ax3.tick_params(axis="x",which="both",labelbottom="off")
# ax3.legend()
resadjust(ax3,yres=0.01)
ax2.plot(histoRef[:,step*6]-delay, histoRef[:,step*6+2], color='g',ls='', marker='o',label='w/o Atoms')
ax2.plot(fT2-delay,fitFunc(fT2,*poptRef0),'g-')
ax2.set_xlim([fitParameters["xscale"][0]-delay,fitParameters["xscale"][1]-delay])
ax2.set_ylim([0,0.08])
ax2.plot([1.5878,1.5878],[0,0.08],"k--")
ax2.text(0.02,0.8,'(a)', transform=ax2.transAxes)
# ax4.grid(True)
ax4.set_xlabel("Time ($\mu s$)")
# ax4.legend()
# ax2.set_ylabel("transmitted\n photons\n in 50ns",labelpad=-0.05)
ax3.set_ylabel("transmitted photons in 50ns",labelpad=-0.00, fontsize=14)
# ax4.set_ylabel("transmitted photons\n in 50ns",labelpad=-0.05)
resadjust(ax2,yres=0.02)
p1=popt0[1]-poptRef0[1]
p1Err=np.sqrt(pcov0[1,1]+pcovRef0[1,1])
p2=popt1[1]-poptRef0[1]
p2Err=np.sqrt(pcov1[1,1]+pcovRef0[1,1])
dp = (p2-p1)
dpErr = np.sqrt(p1Err**2+p2Err**2)
#dpErr = np.sqrt(p1Err**2+p2Err**2)
OD1=2*np.log(poptRef0[0]/popt0[0])
OD2=2*np.log(poptRef0[0]/popt1[0])
ODer1=np.sqrt((2./poptRef0[0])**2*pcovRef0[0][0]+(2./popt0[0])**2*pcov0[0][0])
ODer2=np.sqrt((2./poptRef0[0])**2*pcovRef0[0][0]+(2./popt1[0])**2*pcov1[0][0])
pLst.append(dp)
pErrLst.append(dpErr)
print "Postselected phase shift in pi"
print "phase on "+str(p1)+" +- "+str(p1Err)
print "phase off "+str(p2)+" +- "+str(p2Err)
print "phase difference "+ str(dp) +" +- "+str(dpErr)
print "Od on "+str(OD1)+" +- "+str(ODer1)
print "OD off "+str(OD2)+" +- "+str(ODer2)
print "w/o atoms"
print poptRef1
print "Gate off (full ensemble)"
print popt1
print "Postselected (gate on)"
print popt0
plt.show()
fig.set_size_inches(10,6)
fig2.set_size_inches(6.3,2.5)
fig3.set_size_inches(6.3,2.5)
#fig2.savefig("transmitted0.pdf")
#fig.savefig("GateOnOff.pdf")
#fig3.savefig("reference.pdf") | mit |
FRESNA/PyPSA | pypsa/pf.py | 1 | 51617 | ## Copyright 2015-2018 Tom Brown (FIAS), Jonas Hoersch (FIAS)
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Power flow functionality.
"""
# make the code as Python 3 compatible as possible
from __future__ import division, absolute_import
from six.moves import range
from six import iterkeys
from six.moves.collections_abc import Sequence
__author__ = "Tom Brown (FIAS), Jonas Hoersch (FIAS), Fabian Neumann (KIT)"
__copyright__ = "Copyright 2015-2017 Tom Brown (FIAS), Jonas Hoersch (FIAS), Copyright 2019 Fabian Neumann (KIT), GNU GPL 3"
import logging
logger = logging.getLogger(__name__)
from scipy.sparse import issparse, csr_matrix, csc_matrix, hstack as shstack, vstack as svstack, dok_matrix
from numpy import r_, ones
from scipy.sparse.linalg import spsolve
from numpy.linalg import norm
import numpy as np
import pandas as pd
import networkx as nx
import six
from operator import itemgetter
import time
from .descriptors import get_switchable_as_dense, allocate_series_dataframes, Dict, zsum, degree
pd.Series.zsum = zsum
def normed(s): return s/s.sum()
def real(X): return np.real(X.to_numpy())
def imag(X): return np.imag(X.to_numpy())
def _as_snapshots(network, snapshots):
if snapshots is None:
snapshots = network.snapshots
if (isinstance(snapshots, six.string_types) or
not isinstance(snapshots, (Sequence, pd.Index))):
return pd.Index([snapshots])
else:
return pd.Index(snapshots)
def _allocate_pf_outputs(network, linear=False):
to_allocate = {'Generator': ['p'],
'Load': ['p'],
'StorageUnit': ['p'],
'Store': ['p'],
'ShuntImpedance': ['p'],
'Bus': ['p', 'v_ang', 'v_mag_pu'],
'Line': ['p0', 'p1'],
'Transformer': ['p0', 'p1'],
'Link': ["p"+col[3:] for col in network.links.columns if col[:3] == "bus"]}
if not linear:
for component, attrs in to_allocate.items():
if "p" in attrs:
attrs.append("q")
if "p0" in attrs and component != 'Link':
attrs.extend(["q0","q1"])
allocate_series_dataframes(network, to_allocate)
def _calculate_controllable_nodal_power_balance(sub_network, network, snapshots, buses_o):
for n in ("q", "p"):
# allow all one ports to dispatch as set
for c in sub_network.iterate_components(network.controllable_one_port_components):
c_n_set = get_switchable_as_dense(network, c.name, n + '_set', snapshots, c.ind)
c.pnl[n].loc[snapshots, c.ind] = c_n_set
# set the power injection at each node from controllable components
network.buses_t[n].loc[snapshots, buses_o] = \
sum([((c.pnl[n].loc[snapshots, c.ind] * c.df.loc[c.ind, 'sign'])
.groupby(c.df.loc[c.ind, 'bus'], axis=1).sum()
.reindex(columns=buses_o, fill_value=0.))
for c in sub_network.iterate_components(network.controllable_one_port_components)])
if n == "p":
network.buses_t[n].loc[snapshots, buses_o] += sum(
[(- c.pnl[n+str(i)].loc[snapshots].groupby(c.df["bus"+str(i)], axis=1).sum()
.reindex(columns=buses_o, fill_value=0))
for c in network.iterate_components(network.controllable_branch_components)
for i in [int(col[3:]) for col in c.df.columns if col[:3] == "bus"]])
def _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=False,
distribute_slack=False, slack_weights='p_set', **kwargs):
if linear:
sub_network_pf_fun = sub_network_lpf
sub_network_prepare_fun = calculate_B_H
else:
sub_network_pf_fun = sub_network_pf
sub_network_prepare_fun = calculate_Y
if not skip_pre:
network.determine_network_topology()
calculate_dependent_values(network)
_allocate_pf_outputs(network, linear)
snapshots = _as_snapshots(network, snapshots)
#deal with links
if not network.links.empty:
p_set = get_switchable_as_dense(network, 'Link', 'p_set', snapshots)
network.links_t.p0.loc[snapshots] = p_set.loc[snapshots]
for i in [int(col[3:]) for col in network.links.columns if col[:3] == "bus" and col != "bus0"]:
eff_name = "efficiency" if i == 1 else "efficiency{}".format(i)
efficiency = get_switchable_as_dense(network, 'Link', eff_name, snapshots)
links = network.links.index[network.links["bus{}".format(i)] != ""]
network.links_t['p{}'.format(i)].loc[snapshots, links] = -network.links_t.p0.loc[snapshots, links]*efficiency.loc[snapshots, links]
itdf = pd.DataFrame(index=snapshots, columns=network.sub_networks.index, dtype=int)
difdf = pd.DataFrame(index=snapshots, columns=network.sub_networks.index)
cnvdf = pd.DataFrame(index=snapshots, columns=network.sub_networks.index, dtype=bool)
for sub_network in network.sub_networks.obj:
if not skip_pre:
find_bus_controls(sub_network)
branches_i = sub_network.branches_i()
if len(branches_i) > 0:
sub_network_prepare_fun(sub_network, skip_pre=True)
if type(slack_weights) == dict:
sn_slack_weights = slack_weights[sub_network.name]
else:
sn_slack_weights = slack_weights
if type(sn_slack_weights) == dict:
sn_slack_weights = pd.Series(sn_slack_weights)
if not linear:
# escape for single-bus sub-network
if len(sub_network.buses()) <= 1:
itdf[sub_network.name],\
difdf[sub_network.name],\
cnvdf[sub_network.name] = sub_network_pf_singlebus(sub_network, snapshots=snapshots, skip_pre=True,
distribute_slack=distribute_slack,
slack_weights=sn_slack_weights)
else:
itdf[sub_network.name],\
difdf[sub_network.name],\
cnvdf[sub_network.name] = sub_network_pf_fun(sub_network, snapshots=snapshots,
skip_pre=True, distribute_slack=distribute_slack,
slack_weights=sn_slack_weights, **kwargs)
else:
sub_network_pf_fun(sub_network, snapshots=snapshots, skip_pre=True, **kwargs)
if not linear:
return Dict({ 'n_iter': itdf, 'error': difdf, 'converged': cnvdf })
def network_pf(network, snapshots=None, skip_pre=False, x_tol=1e-6, use_seed=False,
distribute_slack=False, slack_weights='p_set'):
"""
Full non-linear power flow for generic network.
Parameters
----------
snapshots : list-like|single snapshot
A subset or an elements of network.snapshots on which to run
the power flow, defaults to network.snapshots
skip_pre : bool, default False
Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls.
x_tol: float
Tolerance for Newton-Raphson power flow.
use_seed : bool, default False
Use a seed for the initial guess for the Newton-Raphson algorithm.
distribute_slack : bool, default False
If ``True``, distribute the slack power across generators proportional to generator dispatch by default
or according to the distribution scheme provided in ``slack_weights``.
If ``False`` only the slack generator takes up the slack.
slack_weights : dict|str, default 'p_set'
Distribution scheme describing how to determine the fraction of the total slack power
(of each sub network individually) a bus of the subnetwork takes up.
Default is to distribute proportional to generator dispatch ('p_set').
Another option is to distribute proportional to (optimised) nominal capacity ('p_nom' or 'p_nom_opt').
Custom weights can be specified via a dictionary that has a key for each
subnetwork index (``network.sub_networks.index``) and a
pandas.Series/dict with buses or generators of the
corresponding subnetwork as index/keys.
When specifying custom weights with buses as index/keys the slack power of a bus is distributed
among its generators in proportion to their nominal capacity (``p_nom``) if given, otherwise evenly.
Returns
-------
dict
Dictionary with keys 'n_iter', 'converged', 'error' and dataframe
values indicating number of iterations, convergence status, and
iteration error for each snapshot (rows) and sub_network (columns)
"""
return _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=False, x_tol=x_tol,
use_seed=use_seed, distribute_slack=distribute_slack,
slack_weights=slack_weights)
def newton_raphson_sparse(f, guess, dfdx, x_tol=1e-10, lim_iter=100, distribute_slack=False, slack_weights=None):
"""Solve f(x) = 0 with initial guess for x and dfdx(x). dfdx(x) should
return a sparse Jacobian. Terminate if error on norm of f(x) is <
x_tol or there were more than lim_iter iterations.
"""
slack_args = {"distribute_slack": distribute_slack,
"slack_weights": slack_weights}
converged = False
n_iter = 0
F = f(guess, **slack_args)
diff = norm(F,np.Inf)
logger.debug("Error at iteration %d: %f", n_iter, diff)
while diff > x_tol and n_iter < lim_iter:
n_iter +=1
guess = guess - spsolve(dfdx(guess, **slack_args),F)
F = f(guess, **slack_args)
diff = norm(F,np.Inf)
logger.debug("Error at iteration %d: %f", n_iter, diff)
if diff > x_tol:
logger.warning("Warning, we didn't reach the required tolerance within %d iterations, error is at %f. See the section \"Troubleshooting\" in the documentation for tips to fix this. ", n_iter, diff)
elif not np.isnan(diff):
converged = True
return guess, n_iter, diff, converged
def sub_network_pf_singlebus(sub_network, snapshots=None, skip_pre=False,
distribute_slack=False, slack_weights='p_set', linear=False):
"""
Non-linear power flow for a sub-network consiting of a single bus.
Parameters
----------
snapshots : list-like|single snapshot
A subset or an elements of network.snapshots on which to run
the power flow, defaults to network.snapshots
skip_pre: bool, default False
Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls.
distribute_slack : bool, default False
If ``True``, distribute the slack power across generators proportional to generator dispatch by default
or according to the distribution scheme provided in ``slack_weights``.
If ``False`` only the slack generator takes up the slack.
slack_weights : pandas.Series|str, default 'p_set'
Distribution scheme describing how to determine the fraction of the total slack power
a bus of the subnetwork takes up. Default is to distribute proportional to generator dispatch
('p_set'). Another option is to distribute proportional to (optimised) nominal capacity ('p_nom' or 'p_nom_opt').
Custom weights can be provided via a pandas.Series/dict
that has the generators of the single bus as index/keys.
"""
snapshots = _as_snapshots(sub_network.network, snapshots)
network = sub_network.network
logger.info("Balancing power on single-bus sub-network {} for snapshots {}".format(sub_network, snapshots))
if not skip_pre:
find_bus_controls(sub_network)
_allocate_pf_outputs(network, linear=False)
if type(slack_weights) == dict:
slack_weights = pd.Series(slack_weights)
buses_o = sub_network.buses_o
sn_buses = sub_network.buses().index
_calculate_controllable_nodal_power_balance(sub_network, network, snapshots, buses_o)
v_mag_pu_set = get_switchable_as_dense(network, 'Bus', 'v_mag_pu_set', snapshots)
network.buses_t.v_mag_pu.loc[snapshots,sub_network.slack_bus] = v_mag_pu_set.loc[:,sub_network.slack_bus]
network.buses_t.v_ang.loc[snapshots,sub_network.slack_bus] = 0.
if distribute_slack:
for bus, group in sub_network.generators().groupby('bus'):
if slack_weights in ['p_nom', 'p_nom_opt']:
assert not all(network.generators[slack_weights]) == 0, "Invalid slack weights! Generator attribute {} is always zero.".format(slack_weights)
bus_generator_shares = network.generators[slack_weights].loc[group.index].pipe(normed).fillna(0)
elif slack_weights == 'p_set':
generators_t_p_choice = get_switchable_as_dense(network, 'Generator', slack_weights, snapshots)
assert not generators_t_p_choice.isna().all().all(), "Invalid slack weights! Generator attribute {} is always NaN.".format(slack_weights)
assert not (generators_t_p_choice == 0).all().all(), "Invalid slack weights! Generator attribute {} is always zero.".format(slack_weights)
bus_generator_shares = generators_t_p_choice.loc[snapshots,group.index].apply(normed, axis=1).fillna(0)
else:
bus_generator_shares = slack_weights.pipe(normed).fillna(0)
network.generators_t.p.loc[snapshots,group.index] += bus_generator_shares.multiply(-network.buses_t.p.loc[snapshots,bus], axis=0)
else:
network.generators_t.p.loc[snapshots,sub_network.slack_generator] -= network.buses_t.p.loc[snapshots,sub_network.slack_bus]
network.generators_t.q.loc[snapshots,sub_network.slack_generator] -= network.buses_t.q.loc[snapshots,sub_network.slack_bus]
network.buses_t.p.loc[snapshots,sub_network.slack_bus] = 0.
network.buses_t.q.loc[snapshots,sub_network.slack_bus] = 0.
return 0, 0., True # dummy substitute for newton raphson output
def sub_network_pf(sub_network, snapshots=None, skip_pre=False, x_tol=1e-6, use_seed=False,
distribute_slack=False, slack_weights='p_set'):
"""
Non-linear power flow for connected sub-network.
Parameters
----------
snapshots : list-like|single snapshot
A subset or an elements of network.snapshots on which to run
the power flow, defaults to network.snapshots
skip_pre: bool, default False
Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls.
x_tol: float
Tolerance for Newton-Raphson power flow.
use_seed : bool, default False
Use a seed for the initial guess for the Newton-Raphson algorithm.
distribute_slack : bool, default False
If ``True``, distribute the slack power across generators proportional to generator dispatch by default
or according to the distribution scheme provided in ``slack_weights``.
If ``False`` only the slack generator takes up the slack.
slack_weights : pandas.Series|str, default 'p_set'
Distribution scheme describing how to determine the fraction of the total slack power
a bus of the subnetwork takes up. Default is to distribute proportional to generator dispatch
('p_set'). Another option is to distribute proportional to (optimised) nominal capacity ('p_nom' or 'p_nom_opt').
Custom weights can be provided via a pandas.Series/dict
that has the buses or the generators of the subnetwork as index/keys.
When using custom weights with buses as index/keys the slack power of a bus is distributed
among its generators in proportion to their nominal capacity (``p_nom``) if given, otherwise evenly.
Returns
-------
Tuple of three pandas.Series indicating number of iterations,
remaining error, and convergence status for each snapshot
"""
assert type(slack_weights) in [str, pd.Series, dict], "Type of 'slack_weights' must be string, pd.Series or dict. Is {}.".format(type(slack_weights))
if type(slack_weights) == dict:
slack_weights = pd.Series(slack_weights)
elif type(slack_weights) == str:
valid_strings = ['p_nom', 'p_nom_opt', 'p_set']
assert slack_weights in valid_strings, "String value for 'slack_weights' must be one of {}. Is {}.".format(valid_strings, slack_weights)
snapshots = _as_snapshots(sub_network.network, snapshots)
logger.info("Performing non-linear load-flow on {} sub-network {} for snapshots {}".format(sub_network.network.sub_networks.at[sub_network.name,"carrier"], sub_network, snapshots))
# _sub_network_prepare_pf(sub_network, snapshots, skip_pre, calculate_Y)
network = sub_network.network
if not skip_pre:
calculate_dependent_values(network)
find_bus_controls(sub_network)
_allocate_pf_outputs(network, linear=False)
# get indices for the components on this subnetwork
branches_i = sub_network.branches_i()
buses_o = sub_network.buses_o
sn_buses = sub_network.buses().index
sn_generators = sub_network.generators().index
generator_slack_weights_b = False
bus_slack_weights_b = False
if type(slack_weights) == pd.Series:
if all(i in sn_generators for i in slack_weights.index):
generator_slack_weights_b = True
elif all(i in sn_buses for i in slack_weights.index):
bus_slack_weights_b = True
else:
raise AssertionError("Custom slack weights pd.Series/dict must only have the",
"generators or buses of the subnetwork as index/keys.")
if not skip_pre and len(branches_i) > 0:
calculate_Y(sub_network, skip_pre=True)
_calculate_controllable_nodal_power_balance(sub_network, network, snapshots, buses_o)
def f(guess, distribute_slack=False, slack_weights=None):
last_pq = -1 if distribute_slack else None
network.buses_t.v_ang.loc[now,sub_network.pvpqs] = guess[:len(sub_network.pvpqs)]
network.buses_t.v_mag_pu.loc[now,sub_network.pqs] = guess[len(sub_network.pvpqs):last_pq]
v_mag_pu = network.buses_t.v_mag_pu.loc[now,buses_o]
v_ang = network.buses_t.v_ang.loc[now,buses_o]
V = v_mag_pu*np.exp(1j*v_ang)
if distribute_slack:
slack_power = slack_weights*guess[-1]
mismatch = V*np.conj(sub_network.Y*V) - s + slack_power
else:
mismatch = V*np.conj(sub_network.Y*V) - s
if distribute_slack:
F = r_[real(mismatch)[:],imag(mismatch)[1+len(sub_network.pvs):]]
else:
F = r_[real(mismatch)[1:],imag(mismatch)[1+len(sub_network.pvs):]]
return F
def dfdx(guess, distribute_slack=False, slack_weights=None):
last_pq = -1 if distribute_slack else None
network.buses_t.v_ang.loc[now,sub_network.pvpqs] = guess[:len(sub_network.pvpqs)]
network.buses_t.v_mag_pu.loc[now,sub_network.pqs] = guess[len(sub_network.pvpqs):last_pq]
v_mag_pu = network.buses_t.v_mag_pu.loc[now,buses_o]
v_ang = network.buses_t.v_ang.loc[now,buses_o]
V = v_mag_pu*np.exp(1j*v_ang)
index = r_[:len(buses_o)]
#make sparse diagonal matrices
V_diag = csr_matrix((V,(index,index)))
V_norm_diag = csr_matrix((V/abs(V),(index,index)))
I_diag = csr_matrix((sub_network.Y*V,(index,index)))
dS_dVa = 1j*V_diag*np.conj(I_diag - sub_network.Y*V_diag)
dS_dVm = V_norm_diag*np.conj(I_diag) + V_diag * np.conj(sub_network.Y*V_norm_diag)
J10 = dS_dVa[1+len(sub_network.pvs):,1:].imag
J11 = dS_dVm[1+len(sub_network.pvs):,1+len(sub_network.pvs):].imag
if distribute_slack:
J00 = dS_dVa[:,1:].real
J01 = dS_dVm[:,1+len(sub_network.pvs):].real
J02 = csr_matrix(slack_weights,(1,1+len(sub_network.pvpqs))).T
J12 = csr_matrix((1,len(sub_network.pqs))).T
J_P_blocks = [J00, J01, J02]
J_Q_blocks = [J10, J11, J12]
else:
J00 = dS_dVa[1:,1:].real
J01 = dS_dVm[1:,1+len(sub_network.pvs):].real
J_P_blocks = [J00, J01]
J_Q_blocks = [J10, J11]
J = svstack([
shstack(J_P_blocks),
shstack(J_Q_blocks)
], format="csr")
return J
#Set what we know: slack V and v_mag_pu for PV buses
v_mag_pu_set = get_switchable_as_dense(network, 'Bus', 'v_mag_pu_set', snapshots)
network.buses_t.v_mag_pu.loc[snapshots,sub_network.pvs] = v_mag_pu_set.loc[:,sub_network.pvs]
network.buses_t.v_mag_pu.loc[snapshots,sub_network.slack_bus] = v_mag_pu_set.loc[:,sub_network.slack_bus]
network.buses_t.v_ang.loc[snapshots,sub_network.slack_bus] = 0.
if not use_seed:
network.buses_t.v_mag_pu.loc[snapshots,sub_network.pqs] = 1.
network.buses_t.v_ang.loc[snapshots,sub_network.pvpqs] = 0.
slack_args = {'distribute_slack': distribute_slack}
slack_variable_b = 1 if distribute_slack else 0
if distribute_slack:
if type(slack_weights) == str and slack_weights == 'p_set':
generators_t_p_choice = get_switchable_as_dense(network, 'Generator', slack_weights, snapshots)
bus_generation = generators_t_p_choice.rename(columns=network.generators.bus)
slack_weights_calc = pd.DataFrame(bus_generation.groupby(bus_generation.columns, axis=1).sum(), columns=buses_o).apply(normed, axis=1).fillna(0)
elif type(slack_weights) == str and slack_weights in ['p_nom', 'p_nom_opt']:
assert not all(network.generators[slack_weights]) == 0, "Invalid slack weights! Generator attribute {} is always zero.".format(slack_weights)
slack_weights_calc = network.generators.groupby('bus').sum()[slack_weights].reindex(buses_o).pipe(normed).fillna(0)
elif generator_slack_weights_b:
# convert generator-based slack weights to bus-based slack weights
slack_weights_calc = slack_weights.rename(network.generators.bus).groupby(slack_weights.index.name).sum().reindex(buses_o).pipe(normed).fillna(0)
elif bus_slack_weights_b:
# take bus-based slack weights
slack_weights_calc = slack_weights.reindex(buses_o).pipe(normed).fillna(0)
ss = np.empty((len(snapshots), len(buses_o)), dtype=np.complex)
roots = np.empty((len(snapshots), len(sub_network.pvpqs) + len(sub_network.pqs) + slack_variable_b))
iters = pd.Series(0, index=snapshots)
diffs = pd.Series(index=snapshots)
convs = pd.Series(False, index=snapshots)
for i, now in enumerate(snapshots):
p = network.buses_t.p.loc[now,buses_o]
q = network.buses_t.q.loc[now,buses_o]
ss[i] = s = p + 1j*q
#Make a guess for what we don't know: V_ang for PV and PQs and v_mag_pu for PQ buses
guess = r_[network.buses_t.v_ang.loc[now,sub_network.pvpqs],network.buses_t.v_mag_pu.loc[now,sub_network.pqs]]
if distribute_slack:
guess = np.append(guess, [0]) # for total slack power
if type(slack_weights) is str and slack_weights == 'p_set':
# snapshot-dependent slack weights
slack_args["slack_weights"] = slack_weights_calc.loc[now]
else:
slack_args["slack_weights"] = slack_weights_calc
#Now try and solve
start = time.time()
roots[i], n_iter, diff, converged = newton_raphson_sparse(f, guess, dfdx, x_tol=x_tol, **slack_args)
logger.info("Newton-Raphson solved in %d iterations with error of %f in %f seconds", n_iter,diff,time.time()-start)
iters[now] = n_iter
diffs[now] = diff
convs[now] = converged
#now set everything
if distribute_slack:
last_pq = -1
slack_power = roots[:,-1]
else:
last_pq = None
network.buses_t.v_ang.loc[snapshots,sub_network.pvpqs] = roots[:,:len(sub_network.pvpqs)]
network.buses_t.v_mag_pu.loc[snapshots,sub_network.pqs] = roots[:,len(sub_network.pvpqs):last_pq]
v_mag_pu = network.buses_t.v_mag_pu.loc[snapshots,buses_o].values
v_ang = network.buses_t.v_ang.loc[snapshots,buses_o].values
V = v_mag_pu*np.exp(1j*v_ang)
#add voltages to branches
buses_indexer = buses_o.get_indexer
branch_bus0 = []; branch_bus1 = []
for c in sub_network.iterate_components(network.passive_branch_components):
branch_bus0 += list(c.df.loc[c.ind, 'bus0'])
branch_bus1 += list(c.df.loc[c.ind, 'bus1'])
v0 = V[:,buses_indexer(branch_bus0)]
v1 = V[:,buses_indexer(branch_bus1)]
i0 = np.empty((len(snapshots), sub_network.Y0.shape[0]), dtype=np.complex)
i1 = np.empty((len(snapshots), sub_network.Y1.shape[0]), dtype=np.complex)
for i, now in enumerate(snapshots):
i0[i] = sub_network.Y0*V[i]
i1[i] = sub_network.Y1*V[i]
s0 = pd.DataFrame(v0*np.conj(i0), columns=branches_i, index=snapshots)
s1 = pd.DataFrame(v1*np.conj(i1), columns=branches_i, index=snapshots)
for c in sub_network.iterate_components(network.passive_branch_components):
s0t = s0.loc[:,c.name]
s1t = s1.loc[:,c.name]
c.pnl.p0.loc[snapshots,s0t.columns] = s0t.values.real
c.pnl.q0.loc[snapshots,s0t.columns] = s0t.values.imag
c.pnl.p1.loc[snapshots,s1t.columns] = s1t.values.real
c.pnl.q1.loc[snapshots,s1t.columns] = s1t.values.imag
s_calc = np.empty((len(snapshots), len(buses_o)), dtype=np.complex)
for i in np.arange(len(snapshots)):
s_calc[i] = V[i]*np.conj(sub_network.Y*V[i])
slack_index = buses_o.get_loc(sub_network.slack_bus)
if distribute_slack:
network.buses_t.p.loc[snapshots,sn_buses] = s_calc.real[:,buses_indexer(sn_buses)]
else:
network.buses_t.p.loc[snapshots,sub_network.slack_bus] = s_calc[:,slack_index].real
network.buses_t.q.loc[snapshots,sub_network.slack_bus] = s_calc[:,slack_index].imag
network.buses_t.q.loc[snapshots,sub_network.pvs] = s_calc[:,buses_indexer(sub_network.pvs)].imag
#set shunt impedance powers
shunt_impedances_i = sub_network.shunt_impedances_i()
if len(shunt_impedances_i):
#add voltages
shunt_impedances_v_mag_pu = v_mag_pu[:,buses_indexer(network.shunt_impedances.loc[shunt_impedances_i, 'bus'])]
network.shunt_impedances_t.p.loc[snapshots,shunt_impedances_i] = (shunt_impedances_v_mag_pu**2)*network.shunt_impedances.loc[shunt_impedances_i, 'g_pu'].values
network.shunt_impedances_t.q.loc[snapshots,shunt_impedances_i] = (shunt_impedances_v_mag_pu**2)*network.shunt_impedances.loc[shunt_impedances_i, 'b_pu'].values
#let slack generator take up the slack
if distribute_slack:
distributed_slack_power = network.buses_t.p.loc[snapshots,sn_buses] - ss[:,buses_indexer(sn_buses)].real
for bus, group in sub_network.generators().groupby('bus'):
if type(slack_weights) == str and slack_weights == 'p_set':
generators_t_p_choice = get_switchable_as_dense(network, 'Generator', slack_weights, snapshots)
bus_generator_shares = generators_t_p_choice.loc[snapshots,group.index].apply(normed, axis=1).fillna(0)
network.generators_t.p.loc[snapshots,group.index] += bus_generator_shares.multiply(distributed_slack_power.loc[snapshots,bus], axis=0)
else:
if generator_slack_weights_b:
bus_generator_shares = slack_weights.loc[group.index].pipe(normed).fillna(0)
else:
bus_generators_p_nom = network.generators.p_nom.loc[group.index]
# distribute evenly if no p_nom given
if all(bus_generators_p_nom) == 0:
bus_generators_p_nom = 1
bus_generator_shares = bus_generators_p_nom.pipe(normed).fillna(0)
network.generators_t.p.loc[snapshots,group.index] += distributed_slack_power.loc[snapshots,bus].apply(lambda row: row*bus_generator_shares)
else:
network.generators_t.p.loc[snapshots,sub_network.slack_generator] += network.buses_t.p.loc[snapshots,sub_network.slack_bus] - ss[:,slack_index].real
#set the Q of the slack and PV generators
network.generators_t.q.loc[snapshots,sub_network.slack_generator] += network.buses_t.q.loc[snapshots,sub_network.slack_bus] - ss[:,slack_index].imag
network.generators_t.q.loc[snapshots,network.buses.loc[sub_network.pvs, "generator"]] += np.asarray(network.buses_t.q.loc[snapshots,sub_network.pvs] - ss[:,buses_indexer(sub_network.pvs)].imag)
return iters, diffs, convs
def network_lpf(network, snapshots=None, skip_pre=False):
"""
Linear power flow for generic network.
Parameters
----------
snapshots : list-like|single snapshot
A subset or an elements of network.snapshots on which to run
the power flow, defaults to network.snapshots
skip_pre : bool, default False
Skip the preliminary steps of computing topology, calculating
dependent values and finding bus controls.
Returns
-------
None
"""
_network_prepare_and_run_pf(network, snapshots, skip_pre, linear=True)
def apply_line_types(network):
"""Calculate line electrical parameters x, r, b, g from standard
types.
"""
lines_with_types_b = network.lines.type != ""
if lines_with_types_b.zsum() == 0:
return
missing_types = (pd.Index(network.lines.loc[lines_with_types_b, 'type'].unique())
.difference(network.line_types.index))
assert missing_types.empty, ("The type(s) {} do(es) not exist in network.line_types"
.format(", ".join(missing_types)))
# Get a copy of the lines data
l = (network.lines.loc[lines_with_types_b, ["type", "length", "num_parallel"]]
.join(network.line_types, on='type'))
for attr in ["r","x"]:
l[attr] = l[attr + "_per_length"] * l["length"] / l["num_parallel"]
l["b"] = 2*np.pi*1e-9*l["f_nom"] * l["c_per_length"] * l["length"] * l["num_parallel"]
# now set calculated values on live lines
for attr in ["r", "x", "b"]:
network.lines.loc[lines_with_types_b, attr] = l[attr]
def apply_transformer_types(network):
"""Calculate transformer electrical parameters x, r, b, g from
standard types.
"""
trafos_with_types_b = network.transformers.type != ""
if trafos_with_types_b.zsum() == 0:
return
missing_types = (pd.Index(network.transformers.loc[trafos_with_types_b, 'type'].unique())
.difference(network.transformer_types.index))
assert missing_types.empty, ("The type(s) {} do(es) not exist in network.transformer_types"
.format(", ".join(missing_types)))
# Get a copy of the transformers data
# (joining pulls in "phase_shift", "s_nom", "tap_side" from TransformerType)
t = (network.transformers.loc[trafos_with_types_b, ["type", "tap_position", "num_parallel"]]
.join(network.transformer_types, on='type'))
t["r"] = t["vscr"] /100.
t["x"] = np.sqrt((t["vsc"]/100.)**2 - t["r"]**2)
#NB: b and g are per unit of s_nom
t["g"] = t["pfe"]/(1000. * t["s_nom"])
#for some bizarre reason, some of the standard types in pandapower have i0^2 < g^2
t["b"] = - np.sqrt(((t["i0"]/100.)**2 - t["g"]**2).clip(lower=0))
for attr in ["r","x"]:
t[attr] /= t["num_parallel"]
for attr in ["b","g"]:
t[attr] *= t["num_parallel"]
#deal with tap positions
t["tap_ratio"] = 1. + (t["tap_position"] - t["tap_neutral"]) * (t["tap_step"]/100.)
# now set calculated values on live transformers
for attr in ["r", "x", "g", "b", "phase_shift", "s_nom", "tap_side", "tap_ratio"]:
network.transformers.loc[trafos_with_types_b, attr] = t[attr]
#TODO: status, rate_A
def wye_to_delta(z1,z2,z3):
"""Follows http://home.earthlink.net/~w6rmk/math/wyedelta.htm"""
summand = z1*z2 + z2*z3 + z3*z1
return (summand/z2,summand/z1,summand/z3)
def apply_transformer_t_model(network):
"""Convert given T-model parameters to PI-model parameters using wye-delta transformation"""
z_series = network.transformers.r_pu + 1j*network.transformers.x_pu
y_shunt = network.transformers.g_pu + 1j*network.transformers.b_pu
ts_b = (network.transformers.model == "t") & (y_shunt != 0.)
if ts_b.zsum() == 0:
return
za,zb,zc = wye_to_delta(z_series.loc[ts_b]/2,z_series.loc[ts_b]/2,1/y_shunt.loc[ts_b])
network.transformers.loc[ts_b,"r_pu"] = real(zc)
network.transformers.loc[ts_b,"x_pu"] = imag(zc)
network.transformers.loc[ts_b,"g_pu"] = real(2/za)
network.transformers.loc[ts_b,"b_pu"] = imag(2/za)
def calculate_dependent_values(network):
"""Calculate per unit impedances and append voltages to lines and shunt impedances."""
apply_line_types(network)
apply_transformer_types(network)
network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom)
network.lines["x_pu"] = network.lines.x/(network.lines.v_nom**2)
network.lines["r_pu"] = network.lines.r/(network.lines.v_nom**2)
network.lines["b_pu"] = network.lines.b*network.lines.v_nom**2
network.lines["g_pu"] = network.lines.g*network.lines.v_nom**2
network.lines["x_pu_eff"] = network.lines["x_pu"]
network.lines["r_pu_eff"] = network.lines["r_pu"]
#convert transformer impedances from base power s_nom to base = 1 MVA
network.transformers["x_pu"] = network.transformers.x/network.transformers.s_nom
network.transformers["r_pu"] = network.transformers.r/network.transformers.s_nom
network.transformers["b_pu"] = network.transformers.b*network.transformers.s_nom
network.transformers["g_pu"] = network.transformers.g*network.transformers.s_nom
network.transformers["x_pu_eff"] = network.transformers["x_pu"]* network.transformers["tap_ratio"]
network.transformers["r_pu_eff"] = network.transformers["r_pu"]* network.transformers["tap_ratio"]
apply_transformer_t_model(network)
network.shunt_impedances["v_nom"] = network.shunt_impedances["bus"].map(network.buses.v_nom)
network.shunt_impedances["b_pu"] = network.shunt_impedances.b*network.shunt_impedances.v_nom**2
network.shunt_impedances["g_pu"] = network.shunt_impedances.g*network.shunt_impedances.v_nom**2
def find_slack_bus(sub_network):
"""Find the slack bus in a connected sub-network."""
gens = sub_network.generators()
if len(gens) == 0:
# logger.warning("No generators in sub-network {}, better hope power is already balanced".format(sub_network.name))
sub_network.slack_generator = None
sub_network.slack_bus = sub_network.buses_i()[0]
else:
slacks = gens[gens.control == "Slack"].index
if len(slacks) == 0:
sub_network.slack_generator = gens.index[0]
sub_network.network.generators.loc[sub_network.slack_generator,"control"] = "Slack"
logger.debug("No slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator))
elif len(slacks) == 1:
sub_network.slack_generator = slacks[0]
else:
sub_network.slack_generator = slacks[0]
sub_network.network.generators.loc[slacks[1:],"control"] = "PV"
logger.debug("More than one slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator))
sub_network.slack_bus = gens.bus[sub_network.slack_generator]
#also put it into the dataframe
sub_network.network.sub_networks.at[sub_network.name,"slack_bus"] = sub_network.slack_bus
logger.debug("Slack bus for sub-network {} is {}".format(sub_network.name, sub_network.slack_bus))
def find_bus_controls(sub_network):
"""Find slack and all PV and PQ buses for a sub_network.
This function also fixes sub_network.buses_o, a DataFrame
ordered by control type."""
network = sub_network.network
find_slack_bus(sub_network)
gens = sub_network.generators()
buses_i = sub_network.buses_i()
#default bus control is PQ
network.buses.loc[buses_i, "control"] = "PQ"
#find all buses with one or more gens with PV
pvs = gens[gens.control == 'PV'].index.to_series()
if len(pvs) > 0:
pvs = pvs.groupby(gens.bus).first()
network.buses.loc[pvs.index, "control"] = "PV"
network.buses.loc[pvs.index, "generator"] = pvs
network.buses.loc[sub_network.slack_bus, "control"] = "Slack"
network.buses.loc[sub_network.slack_bus, "generator"] = sub_network.slack_generator
buses_control = network.buses.loc[buses_i, "control"]
sub_network.pvs = buses_control.index[buses_control == "PV"]
sub_network.pqs = buses_control.index[buses_control == "PQ"]
sub_network.pvpqs = sub_network.pvs.append(sub_network.pqs)
# order buses
sub_network.buses_o = sub_network.pvpqs.insert(0, sub_network.slack_bus)
def calculate_B_H(sub_network,skip_pre=False):
"""Calculate B and H matrices for AC or DC sub-networks."""
network = sub_network.network
if not skip_pre:
calculate_dependent_values(network)
find_bus_controls(sub_network)
if network.sub_networks.at[sub_network.name,"carrier"] == "DC":
attribute="r_pu_eff"
else:
attribute="x_pu_eff"
#following leans heavily on pypower.makeBdc
#susceptances
b = 1./np.concatenate([(c.df.loc[c.ind, attribute]).values \
for c in sub_network.iterate_components(network.passive_branch_components)])
if np.isnan(b).any():
logger.warning("Warning! Some series impedances are zero - this will cause a singularity in LPF!")
b_diag = csr_matrix((b, (r_[:len(b)], r_[:len(b)])))
#incidence matrix
sub_network.K = sub_network.incidence_matrix(busorder=sub_network.buses_o)
sub_network.H = b_diag*sub_network.K.T
#weighted Laplacian
sub_network.B = sub_network.K * sub_network.H
sub_network.p_branch_shift = -b*np.concatenate([(c.df.loc[c.ind, "phase_shift"]).values*np.pi/180. if c.name == "Transformer"
else np.zeros((len(c.ind),))
for c in sub_network.iterate_components(network.passive_branch_components)])
sub_network.p_bus_shift = sub_network.K * sub_network.p_branch_shift
def calculate_PTDF(sub_network,skip_pre=False):
"""
Calculate the Power Transfer Distribution Factor (PTDF) for
sub_network.
Sets sub_network.PTDF as a (dense) numpy array.
Parameters
----------
sub_network : pypsa.SubNetwork
skip_pre : bool, default False
Skip the preliminary steps of computing topology, calculating dependent values,
finding bus controls and computing B and H.
"""
if not skip_pre:
calculate_B_H(sub_network)
#calculate inverse of B with slack removed
n_pvpq = len(sub_network.pvpqs)
index = np.r_[:n_pvpq]
I = csc_matrix((np.ones(n_pvpq), (index, index)))
B_inverse = spsolve(sub_network.B[1:, 1:],I)
#exception for two-node networks, where B_inverse is a 1d array
if issparse(B_inverse):
B_inverse = B_inverse.toarray()
elif B_inverse.shape == (1,):
B_inverse = B_inverse.reshape((1,1))
#add back in zeroes for slack
B_inverse = np.hstack((np.zeros((n_pvpq,1)),B_inverse))
B_inverse = np.vstack((np.zeros(n_pvpq+1),B_inverse))
sub_network.PTDF = sub_network.H*B_inverse
def calculate_Y(sub_network,skip_pre=False):
"""Calculate bus admittance matrices for AC sub-networks."""
if not skip_pre:
calculate_dependent_values(sub_network.network)
if sub_network.network.sub_networks.at[sub_network.name,"carrier"] != "AC":
logger.warning("Non-AC networks not supported for Y!")
return
branches = sub_network.branches()
buses_o = sub_network.buses_o
network = sub_network.network
#following leans heavily on pypower.makeYbus
#Copyright Richard Lincoln, Ray Zimmerman, BSD-style licence
num_branches = len(branches)
num_buses = len(buses_o)
y_se = 1/(branches["r_pu"] + 1.j*branches["x_pu"])
y_sh = branches["g_pu"]+ 1.j*branches["b_pu"]
tau = branches["tap_ratio"].fillna(1.)
#catch some transformers falsely set with tau = 0 by pypower
tau[tau==0] = 1.
#define the HV tap ratios
tau_hv = pd.Series(1.,branches.index)
tau_hv[branches.tap_side==0] = tau[branches.tap_side==0]
#define the LV tap ratios
tau_lv = pd.Series(1.,branches.index)
tau_lv[branches.tap_side==1] = tau[branches.tap_side==1]
phase_shift = np.exp(1.j*branches["phase_shift"].fillna(0.)*np.pi/180.)
#build the admittance matrix elements for each branch
Y11 = (y_se + 0.5*y_sh)/tau_lv**2
Y10 = -y_se/tau_lv/tau_hv/phase_shift
Y01 = -y_se/tau_lv/tau_hv/np.conj(phase_shift)
Y00 = (y_se + 0.5*y_sh)/tau_hv**2
#bus shunt impedances
b_sh = network.shunt_impedances.b_pu.groupby(network.shunt_impedances.bus).sum().reindex(buses_o, fill_value = 0.)
g_sh = network.shunt_impedances.g_pu.groupby(network.shunt_impedances.bus).sum().reindex(buses_o, fill_value = 0.)
Y_sh = g_sh + 1.j*b_sh
#get bus indices
bus0 = buses_o.get_indexer(branches.bus0)
bus1 = buses_o.get_indexer(branches.bus1)
#connection matrices
C0 = csr_matrix((ones(num_branches), (np.arange(num_branches), bus0)), (num_branches, num_buses))
C1 = csr_matrix((ones(num_branches), (np.arange(num_branches), bus1)), (num_branches, num_buses))
#build Y{0,1} such that Y{0,1} * V is the vector complex branch currents
i = r_[np.arange(num_branches), np.arange(num_branches)]
sub_network.Y0 = csr_matrix((r_[Y00,Y01],(i,r_[bus0,bus1])), (num_branches,num_buses))
sub_network.Y1 = csr_matrix((r_[Y10,Y11],(i,r_[bus0,bus1])), (num_branches,num_buses))
#now build bus admittance matrix
sub_network.Y = C0.T * sub_network.Y0 + C1.T * sub_network.Y1 + \
csr_matrix((Y_sh, (np.arange(num_buses), np.arange(num_buses))))
def aggregate_multi_graph(sub_network):
"""Aggregate branches between same buses and replace with a single
branch with aggregated properties (e.g. s_nom is summed, length is
averaged).
"""
network = sub_network.network
count = 0
seen = []
graph = sub_network.graph()
for u,v in graph.edges():
if (u,v) in seen:
continue
line_objs = list(graph.adj[u][v].keys())
if len(line_objs) > 1:
lines = network.lines.loc[[l[1] for l in line_objs]]
aggregated = {}
attr_inv = ["x","r"]
attr_sum = ["s_nom","b","g","s_nom_max","s_nom_min"]
attr_mean = ["capital_cost","length","terrain_factor"]
for attr in attr_inv:
aggregated[attr] = 1./(1./lines[attr]).sum()
for attr in attr_sum:
aggregated[attr] = lines[attr].sum()
for attr in attr_mean:
aggregated[attr] = lines[attr].mean()
count += len(line_objs) - 1
#remove all but first line
for line in line_objs[1:]:
network.remove("Line",line[1])
rep = line_objs[0]
for key,value in aggregated.items():
setattr(rep,key,value)
seen.append((u,v))
logger.info("Removed %d excess lines from sub-network %s and replaced with aggregated lines", count,sub_network.name)
def find_tree(sub_network, weight='x_pu'):
"""Get the spanning tree of the graph, choose the node with the
highest degree as a central "tree slack" and then see for each
branch which paths from the slack to each node go through the
branch.
"""
branches_bus0 = sub_network.branches()["bus0"]
branches_i = branches_bus0.index
buses_i = sub_network.buses_i()
graph = sub_network.graph(weight=weight, inf_weight=1.)
sub_network.tree = nx.minimum_spanning_tree(graph)
#find bus with highest degree to use as slack
tree_slack_bus, slack_degree = max(degree(sub_network.tree), key=itemgetter(1))
logger.debug("Tree slack bus is %s with degree %d.", tree_slack_bus, slack_degree)
#determine which buses are supplied in tree through branch from slack
#matrix to store tree structure
sub_network.T = dok_matrix((len(branches_i),len(buses_i)))
for j,bus in enumerate(buses_i):
path = nx.shortest_path(sub_network.tree,bus,tree_slack_bus)
for i in range(len(path)-1):
branch = next(iterkeys(graph[path[i]][path[i+1]]))
branch_i = branches_i.get_loc(branch)
sign = +1 if branches_bus0.iat[branch_i] == path[i] else -1
sub_network.T[branch_i,j] = sign
def find_cycles(sub_network, weight='x_pu'):
"""
Find all cycles in the sub_network and record them in sub_network.C.
networkx collects the cycles with more than 2 edges; then the 2-edge cycles
from the MultiGraph must be collected separately (for cases where there
are multiple lines between the same pairs of buses).
Cycles with infinite impedance are skipped.
"""
branches_bus0 = sub_network.branches()["bus0"]
branches_i = branches_bus0.index
#reduce to a non-multi-graph for cycles with > 2 edges
mgraph = sub_network.graph(weight=weight, inf_weight=False)
graph = nx.OrderedGraph(mgraph)
cycles = nx.cycle_basis(graph)
#number of 2-edge cycles
num_multi = len(mgraph.edges()) - len(graph.edges())
sub_network.C = dok_matrix((len(branches_bus0),len(cycles)+num_multi))
for j,cycle in enumerate(cycles):
for i in range(len(cycle)):
branch = next(iterkeys(mgraph[cycle[i]][cycle[(i+1)%len(cycle)]]))
branch_i = branches_i.get_loc(branch)
sign = +1 if branches_bus0.iat[branch_i] == cycle[i] else -1
sub_network.C[branch_i,j] += sign
#counter for multis
c = len(cycles)
#add multi-graph 2-edge cycles for multiple branches between same pairs of buses
for u,v in graph.edges():
bs = list(mgraph[u][v].keys())
if len(bs) > 1:
first = bs[0]
first_i = branches_i.get_loc(first)
for b in bs[1:]:
b_i = branches_i.get_loc(b)
sign = -1 if branches_bus0.iat[b_i] == branches_bus0.iat[first_i] else +1
sub_network.C[first_i,c] = 1
sub_network.C[b_i,c] = sign
c+=1
def sub_network_lpf(sub_network, snapshots=None, skip_pre=False):
"""
Linear power flow for connected sub-network.
Parameters
----------
snapshots : list-like|single snapshot
A subset or an elements of network.snapshots on which to run
the power flow, defaults to network.snapshots
skip_pre : bool, default False
Skip the preliminary steps of computing topology, calculating
dependent values and finding bus controls.
Returns
-------
None
"""
snapshots = _as_snapshots(sub_network.network, snapshots)
logger.info("Performing linear load-flow on %s sub-network %s for snapshot(s) %s",
sub_network.network.sub_networks.at[sub_network.name,"carrier"], sub_network, snapshots)
network = sub_network.network
if not skip_pre:
calculate_dependent_values(network)
find_bus_controls(sub_network)
_allocate_pf_outputs(network, linear=True)
# get indices for the components on this subnetwork
buses_o = sub_network.buses_o
branches_i = sub_network.branches_i()
# allow all shunt impedances to dispatch as set
shunt_impedances_i = sub_network.shunt_impedances_i()
network.shunt_impedances_t.p.loc[snapshots, shunt_impedances_i] = \
network.shunt_impedances.g_pu.loc[shunt_impedances_i].values
# allow all one ports to dispatch as set
for c in sub_network.iterate_components(network.controllable_one_port_components):
c_p_set = get_switchable_as_dense(network, c.name, 'p_set', snapshots, c.ind)
c.pnl.p.loc[snapshots, c.ind] = c_p_set
# set the power injection at each node
network.buses_t.p.loc[snapshots, buses_o] = \
sum([((c.pnl.p.loc[snapshots, c.ind] * c.df.loc[c.ind, 'sign'])
.groupby(c.df.loc[c.ind, 'bus'], axis=1).sum()
.reindex(columns=buses_o, fill_value=0.))
for c in sub_network.iterate_components(network.one_port_components)]
+
[(- c.pnl["p"+str(i)].loc[snapshots].groupby(c.df["bus"+str(i)], axis=1).sum()
.reindex(columns=buses_o, fill_value=0))
for c in network.iterate_components(network.controllable_branch_components)
for i in [int(col[3:]) for col in c.df.columns if col[:3] == "bus"]])
if not skip_pre and len(branches_i) > 0:
calculate_B_H(sub_network, skip_pre=True)
v_diff = np.zeros((len(snapshots), len(buses_o)))
if len(branches_i) > 0:
p = network.buses_t['p'].loc[snapshots, buses_o].values - sub_network.p_bus_shift
v_diff[:,1:] = spsolve(sub_network.B[1:, 1:], p[:,1:].T).T
flows = pd.DataFrame(v_diff * sub_network.H.T,
columns=branches_i, index=snapshots) + sub_network.p_branch_shift
for c in sub_network.iterate_components(network.passive_branch_components):
f = flows.loc[:, c.name]
c.pnl.p0.loc[snapshots, f.columns] = f
c.pnl.p1.loc[snapshots, f.columns] = -f
if network.sub_networks.at[sub_network.name,"carrier"] == "DC":
network.buses_t.v_mag_pu.loc[snapshots, buses_o] = 1 + v_diff
network.buses_t.v_ang.loc[snapshots, buses_o] = 0.
else:
network.buses_t.v_ang.loc[snapshots, buses_o] = v_diff
network.buses_t.v_mag_pu.loc[snapshots, buses_o] = 1.
# set slack bus power to pick up remained
slack_adjustment = (- network.buses_t.p.loc[snapshots, buses_o[1:]].sum(axis=1).fillna(0.)
- network.buses_t.p.loc[snapshots, buses_o[0]])
network.buses_t.p.loc[snapshots, buses_o[0]] += slack_adjustment
# let slack generator take up the slack
if sub_network.slack_generator is not None:
network.generators_t.p.loc[snapshots, sub_network.slack_generator] += slack_adjustment
def network_batch_lpf(network,snapshots=None):
"""Batched linear power flow with numpy.dot for several snapshots."""
raise NotImplementedError("Batch linear power flow not supported yet.")
| gpl-3.0 |
hannorein/rebound | python_examples/megno/problem.py | 1 | 3476 | #!/usr/bin/python
# This example integrates Jupiter and Saturn in the Solar system for a variety of initial conditions.
# Alongside the normal equations of motions, IAS15 is used to integrate the variational equations.
# These can be used to measure the Mean Exponential Growth of Nearby Orbits (MEGNO), a chaos indicator.
# This example script runs 12^2 simulations and plots the MEGNO value. Values close to <Y>=2 correspond
# to regular quasi-periodic orbits. Higher values of <Y> correspond to chaotic orbits.
# Import matplotlib
import matplotlib; matplotlib.use("pdf")
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# Import the rebound module
import rebound
# Import other modules
import numpy as np
import multiprocessing
import warnings
# Runs one simulation.
def simulation(par):
saturn_a, saturn_e = par
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.min_dt = 5.
sim.dt = 1.
# These parameters are only approximately those of Jupiter and Saturn.
sun = rebound.Particle(m=1.)
sim.add(sun)
jupiter = sim.add(primary=sun,m=0.000954, a=5.204, M=0.600, omega=0.257, e=0.048)
saturn = sim.add(primary=sun,m=0.000285, a=saturn_a, M=0.871, omega=1.616, e=saturn_e)
sim.move_to_com()
sim.init_megno()
# Hide warning messages (WHFast timestep too large)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sim.integrate(1e3*2.*np.pi)
return [sim.calculate_megno(),1./(sim.calculate_lyapunov()*2.*np.pi)] # returns MEGNO and Lypunov timescale in years
### Setup grid and run many simulations in parallel
N = 100 # Grid size, increase this number to see more detail
a = np.linspace(7.,10.,N) # range of saturn semi-major axis in AU
e = np.linspace(0.,0.5,N) # range of saturn eccentricity
parameters = []
for _e in e:
for _a in a:
parameters.append([_a,_e])
simulation((8,0.))
# Run simulations in parallel
pool = rebound.InterruptiblePool() # Number of threads default to the number of CPUs on the system
print("Running %d simulations on %d threads..." % (len(parameters), pool._processes))
res = np.nan_to_num(np.array(pool.map(simulation,parameters)))
megno = np.clip(res[:,0].reshape((N,N)),1.8,4.) # clip arrays to plot saturated
lyaptimescale = np.clip(np.absolute(res[:,1].reshape((N,N))),1e1,1e5)
### Create plot and save as pdf
# Setup plots
f, axarr = plt.subplots(2,figsize=(10,10))
extent = [a.min(), a.max(), e.min(), e.max()]
for ax in axarr:
ax.set_xlim(extent[0],extent[1])
ax.set_ylim(extent[2],extent[3])
ax.set_xlabel("$a_{\mathrm{Saturn}}$ [AU]")
ax.set_ylabel("$e_{\mathrm{Saturn}}$")
# Plot MEGNO
im1 = axarr[0].imshow(megno, vmin=1.8, vmax=4., aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn_r", extent=extent)
cb1 = plt.colorbar(im1, ax=axarr[0])
cb1.solids.set_rasterized(True)
cb1.set_label("MEGNO $\\langle Y \\rangle$")
# Plot Lyapunov timescale
im2 = axarr[1].imshow(lyaptimescale, norm=LogNorm(vmin=1e1, vmax=1e5), aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn", extent=extent)
cb2 = plt.colorbar(im2, ax=axarr[1])
cb2.solids.set_rasterized(True)
cb2.set_label("Lyapunov timescale [years]")
plt.savefig("megno.pdf")
### Automatically open plot (OSX only)
from sys import platform as _platform
if _platform == "darwin":
import os
os.system("open megno.pdf")
| gpl-3.0 |
evanbiederstedt/RRBSfun | epiphen/cll_tests/total_CLL_chr04.py | 1 | 8306 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(cw154))
print(len(trito))
totalfiles = cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr4_"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GACACG",
"RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG",
"RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC",
"RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG",
"RBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG",
"RRBS_trito_pool_1_TAAGGCGA.ACAACC",
"RRBS_trito_pool_1_TAAGGCGA.ACGTGG",
"RRBS_trito_pool_1_TAAGGCGA.ACTCAC",
"RRBS_trito_pool_1_TAAGGCGA.ATAGCG",
"RRBS_trito_pool_1_TAAGGCGA.ATCGAC",
"RRBS_trito_pool_1_TAAGGCGA.CAAGAG",
"RRBS_trito_pool_1_TAAGGCGA.CATGAC",
"RRBS_trito_pool_1_TAAGGCGA.CCTTCG",
"RRBS_trito_pool_1_TAAGGCGA.CGGTAG",
"RRBS_trito_pool_1_TAAGGCGA.CTATTG",
"RRBS_trito_pool_1_TAAGGCGA.GACACG",
"RRBS_trito_pool_1_TAAGGCGA.GCATTC",
"RRBS_trito_pool_1_TAAGGCGA.GCTGCC",
"RRBS_trito_pool_1_TAAGGCGA.GGCATC",
"RRBS_trito_pool_1_TAAGGCGA.GTGAGG",
"RRBS_trito_pool_1_TAAGGCGA.GTTGAG",
"RRBS_trito_pool_1_TAAGGCGA.TAGCGG",
"RRBS_trito_pool_1_TAAGGCGA.TATCTC",
"RRBS_trito_pool_1_TAAGGCGA.TCTCTG",
"RRBS_trito_pool_1_TAAGGCGA.TGACAG",
"RRBS_trito_pool_1_TAAGGCGA.TGCTGC",
"RRBS_trito_pool_2_CGTACTAG.ACAACC",
"RRBS_trito_pool_2_CGTACTAG.ACGTGG",
"RRBS_trito_pool_2_CGTACTAG.ACTCAC",
"RRBS_trito_pool_2_CGTACTAG.AGGATG",
"RRBS_trito_pool_2_CGTACTAG.ATAGCG",
"RRBS_trito_pool_2_CGTACTAG.ATCGAC",
"RRBS_trito_pool_2_CGTACTAG.CAAGAG",
"RRBS_trito_pool_2_CGTACTAG.CATGAC",
"RRBS_trito_pool_2_CGTACTAG.CCTTCG",
"RRBS_trito_pool_2_CGTACTAG.CGGTAG",
"RRBS_trito_pool_2_CGTACTAG.CTATTG",
"RRBS_trito_pool_2_CGTACTAG.GACACG",
"RRBS_trito_pool_2_CGTACTAG.GCATTC",
"RRBS_trito_pool_2_CGTACTAG.GCTGCC",
"RRBS_trito_pool_2_CGTACTAG.GGCATC",
"RRBS_trito_pool_2_CGTACTAG.GTGAGG",
"RRBS_trito_pool_2_CGTACTAG.GTTGAG",
"RRBS_trito_pool_2_CGTACTAG.TAGCGG",
"RRBS_trito_pool_2_CGTACTAG.TATCTC",
"RRBS_trito_pool_2_CGTACTAG.TCTCTG",
"RRBS_trito_pool_2_CGTACTAG.TGACAG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/CLL_tests")
tott.to_csv("total_CLL_chrom04.phy", header=None, index=None)
print(tott.shape)
| mit |
macks22/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
wlamond/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 95 | 6971 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
ClimbsRocks/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 168 | 2088 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
aspiringfastlaner/spx_options_backtesting | Python Code/black_scholes.py | 1 | 1821 | # Importing modules
import smtplib
import pandas as pd
import datetime as dt
import pandas.stats.moments as st
import matplotlib.pyplot as plt
import os
import quandl as qd
import seaborn as sns
from scipy.stats import norm
from math import *
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
""" # The Black Scholes Formula
# CallPutFlag - This is set to 'c' for call option, anything else for put
# S - Stock price
# K - Strike price
# T - Time to maturity (in days)
# r - Riskfree interest rate
# d - Dividend yield
# v - Volatility (in days)
"""
# Function for black scholes option price
# Set for Puts
def BlackScholes(CallPutFlag,S,K,T,r,d,v):
d1 = (log(float(S)/K)+((r-d)+v*v/2.)*T)/(v*sqrt(T))
d2 = d1-v*sqrt(T)
if CallPutFlag=='c':
return S*exp(-d*T)*norm.cdf(d1)-K*exp(-r*T)*norm.cdf(d2)
else:
return K*exp(-r*T)*norm.cdf(-d2)-S*exp(-d*T)*norm.cdf(-d1)
# Function for black scholes greeks
# Set for Puts
def BlackScholes_Greeks(CallPutFlag, S, K, r, v, T, d):
if CallPutFlag == 'c':
T_sqrt = sqrt(T)
d1 = (log(float(S)/K)+((r-d)+v*v/2.)*T)/(v*T_sqrt)
d2 = d1-v*T_sqrt
Delta = norm.cdf(d1)
Gamma = norm.pdf(d1)/(S*v*T_sqrt)
Theta =- (S*v*norm.pdf(d1))/(2*T_sqrt) - r*K*exp( -r*T)*norm.cdf(d2)
Vega = S * T_sqrt*norm.pdf(d1)
Rho = K*T*exp(-r*T)*norm.cdf(d2)
else:
T_sqrt = sqrt(T)
d1 = (log(float(S)/K)+r*T)/(v*T_sqrt) + 0.5*v*T_sqrt
d2 = d1-(v*T_sqrt)
Delta = -norm.cdf(-d1)
Gamma = norm.pdf(d1)/(S*v*T_sqrt)
Theta = -(S*v*norm.pdf(d1)) / (2*T_sqrt)+ r*K * exp(-r*T) * norm.cdf(-d2)
Vega = S * T_sqrt * norm.pdf(d1)
Rho = -K*T*exp(-r*T) * norm.cdf(-d2)
return Delta, Gamma, Theta, Vega, Rho
| apache-2.0 |
Capstone2017/Machine-Learning-NLP | notebook/2nd_RNN.py | 1 | 3452 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 8 23:07:21 2016
@author: Yu Zhipeng
"""
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Embedding
from keras.layers import LSTM, SimpleRNN, GRU
from keras.constraints import maxnorm
import pandas as pd
# read data from csv and generate raw train and test data
pinitial_train=pd.read_csv('pgpub_claims_fulltext.csv',delimiter='\t',nrows=3000,encoding='utf-8')
pfinal_train=pd.read_csv('patent_claims_fulltext.csv',delimiter='\t',nrows=3000,encoding='utf-8')
pinitial_test=pd.read_csv('pgpub_claims_fulltext.csv',delimiter='\t',nrows=3000,skiprows=range(1,3000),encoding='utf-8')
pfinal_test=pd.read_csv('patent_claims_fulltext.csv',delimiter='\t',nrows=3000,skiprows=range(1,3000),encoding='utf-8')
import numpy as np
X_train = pinitial_train['pub_no,appl_id,claim_no,claim_txt,dependencies,ind_flg'].tolist()+pfinal_train['pat_no,claim_no,claim_txt,dependencies,ind_flg,appl_id'].tolist()#.astype(str)
y_train = np.append(np.zeros(len(pinitial_train)),np.ones(len(pfinal_train)))
X_test = pinitial_test['pub_no,appl_id,claim_no,claim_txt,dependencies,ind_flg'].tolist()+pfinal_test['pat_no,claim_no,claim_txt,dependencies,ind_flg,appl_id'].tolist()
y_test = np.append(np.zeros(len(pinitial_test)),np.ones(len(pfinal_test)))
max_features = 20000
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 64
'''
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train_one_hot, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test_one_hot, maxlen=maxlen)
'''
from collections import Counter
max_features = 20000
all_words = []
for text in X_train + X_test:
all_words.extend(text.split())
unique_words_ordered = [x[0] for x in Counter(all_words).most_common()]
word_ids = {}
rev_word_ids = {}
for i, x in enumerate(unique_words_ordered[:max_features-1]):
word_ids[x] = i + 1 # so we can pad with 0s
rev_word_ids[i + 1] = x
X_train_one_hot = []
for text in X_train:
t_ids = [word_ids[x] for x in text.split() if x in word_ids]
X_train_one_hot.append(t_ids)
X_test_one_hot = []
for text in X_test:
t_ids = [word_ids[x] for x in text.split() if x in word_ids]
X_test_one_hot.append(t_ids)
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train_one_hot, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test_one_hot, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen,dropout=0.2))
model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) # try using a GRU instead, for fun
model.add(Dense(1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=5,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
| mit |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/tests/test_expressions.py | 4 | 16414 | # -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
import nose
import re
from numpy.random import randn
import operator
import numpy as np
from numpy.testing import assert_array_equal
from pandas.core.api import DataFrame, Panel
from pandas.computation import expressions as expr
from pandas import compat
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal,
assert_panel4d_equal)
import pandas.util.testing as tm
from numpy.testing.decorators import slow
if not expr._USE_NUMEXPR:
try:
import numexpr
except ImportError:
msg = "don't have"
else:
msg = "not using"
raise nose.SkipTest("{0} numexpr".format(msg))
_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')
_frame2 = DataFrame(randn(100, 4), columns = list('ABCD'), dtype='float64')
_mixed = DataFrame({ 'A' : _frame['A'].copy(), 'B' : _frame['B'].astype('float32'), 'C' : _frame['C'].astype('int64'), 'D' : _frame['D'].astype('int32') })
_mixed2 = DataFrame({ 'A' : _frame2['A'].copy(), 'B' : _frame2['B'].astype('float32'), 'C' : _frame2['C'].astype('int64'), 'D' : _frame2['D'].astype('int32') })
_integer = DataFrame(np.random.randint(1, 100, size=(10001, 4)), columns = list('ABCD'), dtype='int64')
_integer2 = DataFrame(np.random.randint(1, 100, size=(101, 4)),
columns=list('ABCD'), dtype='int64')
_frame_panel = Panel(dict(ItemA=_frame.copy(), ItemB=(_frame.copy() + 3), ItemC=_frame.copy(), ItemD=_frame.copy()))
_frame2_panel = Panel(dict(ItemA=_frame2.copy(), ItemB=(_frame2.copy() + 3),
ItemC=_frame2.copy(), ItemD=_frame2.copy()))
_integer_panel = Panel(dict(ItemA=_integer,
ItemB=(_integer + 34).astype('int64')))
_integer2_panel = Panel(dict(ItemA=_integer2,
ItemB=(_integer2 + 34).astype('int64')))
_mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3)))
_mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3)))
class TestExpressions(tm.TestCase):
_multiprocess_can_split_ = False
def setUp(self):
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.mixed = _mixed.copy()
self.mixed2 = _mixed2.copy()
self.integer = _integer.copy()
self._MIN_ELEMENTS = expr._MIN_ELEMENTS
def tearDown(self):
expr._MIN_ELEMENTS = self._MIN_ELEMENTS
@nose.tools.nottest
def run_arithmetic_test(self, df, other, assert_func, check_dtype=False,
test_flex=True):
expr._MIN_ELEMENTS = 0
operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow']
if not compat.PY3:
operations.append('div')
for arith in operations:
operator_name = arith
if arith == 'div':
operator_name = 'truediv'
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, operator_name)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
result = op(df, other)
try:
if check_dtype:
if arith == 'truediv':
assert expected.dtype.kind == 'f'
assert_func(expected, result)
except Exception:
com.pprint_thing("Failed test with operator %r" % op.__name__)
raise
def test_integer_arithmetic(self):
self.run_arithmetic_test(self.integer, self.integer,
assert_frame_equal)
self.run_arithmetic_test(self.integer.icol(0), self.integer.icol(0),
assert_series_equal, check_dtype=True)
@nose.tools.nottest
def run_binary_test(self, df, other, assert_func,
test_flex=False, numexpr_ops=set(['gt', 'lt', 'ge',
'le', 'eq', 'ne'])):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
elsewhere.
"""
expr._MIN_ELEMENTS = 0
expr.set_test_mode(True)
operations = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']
for arith in operations:
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, arith)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
expr.get_test_result()
result = op(df, other)
used_numexpr = expr.get_test_result()
try:
if arith in numexpr_ops:
assert used_numexpr, "Did not use numexpr as expected."
else:
assert not used_numexpr, "Used numexpr unexpectedly."
assert_func(expected, result)
except Exception:
com.pprint_thing("Failed test with operation %r" % arith)
com.pprint_thing("test_flex was %r" % test_flex)
raise
def run_frame(self, df, other, binary_comp=None, run_binary=True,
**kwargs):
self.run_arithmetic_test(df, other, assert_frame_equal,
test_flex=False, **kwargs)
self.run_arithmetic_test(df, other, assert_frame_equal, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
expr.set_use_numexpr(False)
binary_comp = other + 1
expr.set_use_numexpr(True)
self.run_binary_test(df, binary_comp, assert_frame_equal,
test_flex=False, **kwargs)
self.run_binary_test(df, binary_comp, assert_frame_equal,
test_flex=True, **kwargs)
def run_series(self, ser, other, binary_comp=None, **kwargs):
self.run_arithmetic_test(ser, other, assert_series_equal,
test_flex=False, **kwargs)
self.run_arithmetic_test(ser, other, assert_almost_equal,
test_flex=True, **kwargs)
# series doesn't uses vec_compare instead of numexpr...
# if binary_comp is None:
# binary_comp = other + 1
# self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=False,
# **kwargs)
# self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=True,
# **kwargs)
def run_panel(self, panel, other, binary_comp=None, run_binary=True,
assert_func=assert_panel_equal, **kwargs):
self.run_arithmetic_test(panel, other, assert_func, test_flex=False,
**kwargs)
self.run_arithmetic_test(panel, other, assert_func, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
binary_comp = other + 1
self.run_binary_test(panel, binary_comp, assert_func,
test_flex=False, **kwargs)
self.run_binary_test(panel, binary_comp, assert_func,
test_flex=True, **kwargs)
def test_integer_arithmetic_frame(self):
self.run_frame(self.integer, self.integer)
def test_integer_arithmetic_series(self):
self.run_series(self.integer.icol(0), self.integer.icol(0))
@slow
def test_integer_panel(self):
self.run_panel(_integer2_panel, np.random.randint(1, 100))
def test_float_arithemtic_frame(self):
self.run_frame(self.frame2, self.frame2)
def test_float_arithmetic_series(self):
self.run_series(self.frame2.icol(0), self.frame2.icol(0))
@slow
def test_float_panel(self):
self.run_panel(_frame2_panel, np.random.randn() + 0.1, binary_comp=0.8)
@slow
def test_panel4d(self):
self.run_panel(tm.makePanel4D(), np.random.randn() + 0.5,
assert_func=assert_panel4d_equal, binary_comp=3)
def test_mixed_arithmetic_frame(self):
# TODO: FIGURE OUT HOW TO GET IT TO WORK...
# can't do arithmetic because comparison methods try to do *entire*
# frame instead of by-column
self.run_frame(self.mixed2, self.mixed2, run_binary=False)
def test_mixed_arithmetic_series(self):
for col in self.mixed2.columns:
self.run_series(self.mixed2[col], self.mixed2[col], binary_comp=4)
@slow
def test_mixed_panel(self):
self.run_panel(_mixed2_panel, np.random.randint(1, 100),
binary_comp=-2)
def test_float_arithemtic(self):
self.run_arithmetic_test(self.frame, self.frame, assert_frame_equal)
self.run_arithmetic_test(self.frame.icol(0), self.frame.icol(0),
assert_series_equal, check_dtype=True)
def test_mixed_arithmetic(self):
self.run_arithmetic_test(self.mixed, self.mixed, assert_frame_equal)
for col in self.mixed.columns:
self.run_arithmetic_test(self.mixed[col], self.mixed[col],
assert_series_equal)
def test_integer_with_zeros(self):
self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))
self.run_arithmetic_test(self.integer, self.integer, assert_frame_equal)
self.run_arithmetic_test(self.integer.icol(0), self.integer.icol(0),
assert_series_equal)
def test_invalid(self):
# no op
result = expr._can_use_numexpr(operator.add, None, self.frame, self.frame, 'evaluate')
self.assertFalse(result)
# mixed
result = expr._can_use_numexpr(operator.add, '+', self.mixed, self.frame, 'evaluate')
self.assertFalse(result)
# min elements
result = expr._can_use_numexpr(operator.add, '+', self.frame2, self.frame2, 'evaluate')
self.assertFalse(result)
# ok, we only check on first part of expression
result = expr._can_use_numexpr(operator.add, '+', self.frame, self.frame2, 'evaluate')
self.assertTrue(result)
def test_binary_ops(self):
def testit():
for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]:
for op, op_str in [('add','+'),('sub','-'),('mul','*'),('div','/'),('pow','**')]:
if op == 'div':
op = getattr(operator, 'truediv', None)
else:
op = getattr(operator, op, None)
if op is not None:
result = expr._can_use_numexpr(op, op_str, f, f, 'evaluate')
self.assertNotEqual(result, f._is_mixed_type)
result = expr.evaluate(op, op_str, f, f, use_numexpr=True)
expected = expr.evaluate(op, op_str, f, f, use_numexpr=False)
assert_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f2, f2, 'evaluate')
self.assertFalse(result)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_boolean_ops(self):
def testit():
for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]:
f11 = f
f12 = f + 1
f21 = f2
f22 = f2 + 1
for op, op_str in [('gt','>'),('lt','<'),('ge','>='),('le','<='),('eq','=='),('ne','!=')]:
op = getattr(operator,op)
result = expr._can_use_numexpr(op, op_str, f11, f12, 'evaluate')
self.assertNotEqual(result, f11._is_mixed_type)
result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True)
expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False)
assert_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f21, f22, 'evaluate')
self.assertFalse(result)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_where(self):
def testit():
for f in [ self.frame, self.frame2, self.mixed, self.mixed2 ]:
for cond in [ True, False ]:
c = np.empty(f.shape,dtype=np.bool_)
c.fill(cond)
result = expr.where(c, f.values, f.values+1)
expected = np.where(c, f.values, f.values+1)
assert_array_equal(result,expected)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_bool_ops_raise_on_arithmetic(self):
df = DataFrame({'a': np.random.rand(10) > 0.5,
'b': np.random.rand(10) > 0.5})
names = 'div', 'truediv', 'floordiv', 'pow'
ops = '/', '/', '//', '**'
msg = 'operator %r not implemented for bool dtypes'
for op, name in zip(ops, names):
if not compat.PY3 or name != 'div':
f = getattr(operator, name)
err_msg = re.escape(msg % op)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df, df)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df.a, df.b)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df.a, True)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(False, df.a)
with tm.assertRaisesRegexp(TypeError, err_msg):
f(False, df)
with tm.assertRaisesRegexp(TypeError, err_msg):
f(df, True)
def test_bool_ops_warn_on_arithmetic(self):
n = 10
df = DataFrame({'a': np.random.rand(n) > 0.5,
'b': np.random.rand(n) > 0.5})
names = 'add', 'mul', 'sub'
ops = '+', '*', '-'
subs = {'+': '|', '*': '&', '-': '^'}
sub_funcs = {'|': 'or_', '&': 'and_', '^': 'xor'}
for op, name in zip(ops, names):
f = getattr(operator, name)
fe = getattr(operator, sub_funcs[subs[op]])
with tm.use_numexpr(True, min_elements=5):
with tm.assert_produces_warning():
r = f(df, df)
e = fe(df, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning():
r = f(df.a, df.b)
e = fe(df.a, df.b)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning():
r = f(df.a, True)
e = fe(df.a, True)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning():
r = f(False, df.a)
e = fe(False, df.a)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning():
r = f(False, df)
e = fe(False, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning():
r = f(df, True)
e = fe(df, True)
tm.assert_frame_equal(r, e)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_toolkits/axes_grid/figures/demo_colorbar_of_inset_axes.py | 5 | 1150 | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.inset_locator import inset_axes, zoomed_inset_axes
from mpl_toolkits.axes_grid.colorbar import colorbar
def get_demo_image():
from matplotlib.cbook import get_sample_data
import numpy as np
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3,4,-4,3)
fig = plt.figure(1, [5,4])
ax = fig.add_subplot(111)
Z, extent = get_demo_image()
ax.set(aspect=1,
xlim=(-15, 15),
ylim=(-20, 5))
axins = zoomed_inset_axes(ax, 2, loc=2) # zoom = 6
im = axins.imshow(Z, extent=extent, interpolation="nearest",
origin="lower")
plt.xticks(visible=False)
plt.yticks(visible=False)
# colorbar
cax = inset_axes(axins,
width="5%", # width = 10% of parent_bbox width
height="100%", # height : 50%
loc=3,
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=axins.transAxes,
borderpad=0,
)
colorbar(im, cax=cax) #, ticks=[1,2,3])
plt.draw()
plt.show()
| gpl-2.0 |
mhue/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
stuart-knock/bokeh | bokeh/charts/builder/step_builder.py | 43 | 5445 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Step class which lets you build your Step charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
from six import string_types
from ..utils import cycle_colors
from .._builder import create_and_build, Builder
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Step(values, index=None, **kws):
""" Create a step chart using :class:`StepBuilder <bokeh.charts.builder.step_builder.StepBuilder>`
render the geometry from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import Step, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
step = Step(xyvalues, title="Steps", legend="top_left", ylabel='Languages')
output_file('step.html')
show(step)
"""
return create_and_build(StepBuilder, values, index=index, **kws)
class StepBuilder(Builder):
"""This is the Step class and it is in charge of plotting
Step charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the
source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""It calculates the chart properties accordingly from Step.values.
Then build a dict containing references to all the points to be
used by the segment glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
self._groups = []
orig_xs = self._values_index
xs = np.empty(2*len(orig_xs)-1, dtype=np.int)
xs[::2] = orig_xs[:]
xs[1::2] = orig_xs[1:]
self._data['x'] = xs
for i, col in enumerate(self._values.keys()):
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self._groups.append(col)
orig_ys = np.array([self._values[col][x] for x in orig_xs])
ys = np.empty(2*len(orig_ys)-1)
ys[::2] = orig_ys[:]
ys[1::2] = orig_ys[:-1]
self._data['y_%s' % col] = ys
def _set_sources(self):
""" Push the Step data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
#y_sources = [sc.columns("y_%s" % col) for col in self._groups]
self.y_range = DataRange1d()
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the Step.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._groups, self.palette)
for i, name in enumerate(self._groups):
# draw the step horizontal segment
glyph = Line(x="x", y="y_%s" % name, line_color=colors[i], line_width=2)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
| bsd-3-clause |
jdmcbr/geopandas | geopandas/io/tests/generate_legacy_storage_files.py | 2 | 2736 | """
Script to create the data and write legacy storage (pickle) files.
Based on pandas' generate_legacy_storage_files.py script.
To use this script, create an environment for which you want to
generate pickles, activate the environment, and run this script as:
$ python geopandas/geopandas/io/tests/generate_legacy_storage_files.py \
geopandas/geopandas/io/tests/data/pickle/ pickle
This script generates a storage file for the current arch, system,
The idea here is you are using the *current* version of the
generate_legacy_storage_files with an *older* version of geopandas to
generate a pickle file. We will then check this file into a current
branch, and test using test_pickle.py. This will load the *older*
pickles and test versus the current data that is generated
(with master). These are then compared.
"""
import os
import pickle
import platform
import sys
import pandas as pd
import geopandas
from shapely.geometry import Point
def create_pickle_data():
""" create the pickle data """
# custom geometry column name
gdf_the_geom = geopandas.GeoDataFrame(
{"a": [1, 2, 3], "the_geom": [Point(1, 1), Point(2, 2), Point(3, 3)]},
geometry="the_geom",
)
# with crs
gdf_crs = geopandas.GeoDataFrame(
{"a": [0.1, 0.2, 0.3], "geometry": [Point(1, 1), Point(2, 2), Point(3, 3)]},
crs="EPSG:4326",
)
return dict(gdf_the_geom=gdf_the_geom, gdf_crs=gdf_crs)
def platform_name():
return "_".join(
[
str(geopandas.__version__),
"pd-" + str(pd.__version__),
"py-" + str(platform.python_version()),
str(platform.machine()),
str(platform.system().lower()),
]
)
def write_legacy_pickles(output_dir):
print(
"This script generates a storage file for the current arch, system, "
"and python version"
)
print("geopandas version: {}").format(geopandas.__version__)
print(" output dir : {}".format(output_dir))
print(" storage format: pickle")
pth = "{}.pickle".format(platform_name())
fh = open(os.path.join(output_dir, pth), "wb")
pickle.dump(create_pickle_data(), fh, pickle.DEFAULT_PROTOCOL)
fh.close()
print("created pickle file: {}".format(pth))
def main():
if len(sys.argv) != 3:
exit(
"Specify output directory and storage type: generate_legacy_"
"storage_files.py <output_dir> <storage_type> "
)
output_dir = str(sys.argv[1])
storage_type = str(sys.argv[2])
if storage_type == "pickle":
write_legacy_pickles(output_dir=output_dir)
else:
exit("storage_type must be one of {'pickle'}")
if __name__ == "__main__":
main()
| bsd-3-clause |
vybstat/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/datasets/tests/test_openml.py | 5 | 51973 | """Test the openml loader.
"""
import gzip
import json
import numpy as np
import os
import re
import scipy.sparse
import sklearn
import pytest
from sklearn import config_context
from sklearn.datasets import fetch_openml
from sklearn.datasets._openml import (_open_openml_url,
_arff,
_DATA_FILE,
_convert_arff_data,
_convert_arff_data_dataframe,
_get_data_description_by_id,
_get_local_path,
_retry_with_clean_cache,
_feature_to_dtype)
from sklearn.utils._testing import (assert_warns_message,
assert_raise_message)
from sklearn.utils import is_scalar_nan
from sklearn.utils._testing import assert_allclose, assert_array_equal
from urllib.error import HTTPError
from sklearn.datasets.tests.test_common import check_return_X_y
from sklearn.externals._arff import ArffContainerType
from functools import partial
from sklearn.utils._testing import fails_if_pypy
currdir = os.path.dirname(os.path.abspath(__file__))
# if True, urlopen will be monkey patched to only use local files
test_offline = True
def _test_features_list(data_id):
# XXX Test is intended to verify/ensure correct decoding behavior
# Not usable with sparse data or datasets that have columns marked as
# {row_identifier, ignore}
def decode_column(data_bunch, col_idx):
col_name = data_bunch.feature_names[col_idx]
if col_name in data_bunch.categories:
# XXX: This would be faster with np.take, although it does not
# handle missing values fast (also not with mode='wrap')
cat = data_bunch.categories[col_name]
result = [None if is_scalar_nan(idx) else cat[int(idx)]
for idx in data_bunch.data[:, col_idx]]
return np.array(result, dtype='O')
else:
# non-nominal attribute
return data_bunch.data[:, col_idx]
data_bunch = fetch_openml(data_id=data_id, cache=False,
target_column=None, as_frame=False)
# also obtain decoded arff
data_description = _get_data_description_by_id(data_id, None)
sparse = data_description['format'].lower() == 'sparse_arff'
if sparse is True:
raise ValueError('This test is not intended for sparse data, to keep '
'code relatively simple')
url = _DATA_FILE.format(data_description['file_id'])
with _open_openml_url(url, data_home=None) as f:
data_arff = _arff.load((line.decode('utf-8') for line in f),
return_type=(_arff.COO if sparse
else _arff.DENSE_GEN),
encode_nominal=False)
data_downloaded = np.array(list(data_arff['data']), dtype='O')
for i in range(len(data_bunch.feature_names)):
# XXX: Test per column, as this makes it easier to avoid problems with
# missing values
np.testing.assert_array_equal(data_downloaded[:, i],
decode_column(data_bunch, i))
def _fetch_dataset_from_openml(data_id, data_name, data_version,
target_column,
expected_observations, expected_features,
expected_missing,
expected_data_dtype, expected_target_dtype,
expect_sparse, compare_default_target):
# fetches a dataset in three various ways from OpenML, using the
# fetch_openml function, and does various checks on the validity of the
# result. Note that this function can be mocked (by invoking
# _monkey_patch_webbased_functions before invoking this function)
data_by_name_id = fetch_openml(name=data_name, version=data_version,
cache=False, as_frame=False)
assert int(data_by_name_id.details['id']) == data_id
# Please note that cache=False is crucial, as the monkey patched files are
# not consistent with reality
fetch_openml(name=data_name, cache=False, as_frame=False)
# without specifying the version, there is no guarantee that the data id
# will be the same
# fetch with dataset id
data_by_id = fetch_openml(data_id=data_id, cache=False,
target_column=target_column, as_frame=False)
assert data_by_id.details['name'] == data_name
assert data_by_id.data.shape == (expected_observations, expected_features)
if isinstance(target_column, str):
# single target, so target is vector
assert data_by_id.target.shape == (expected_observations, )
assert data_by_id.target_names == [target_column]
elif isinstance(target_column, list):
# multi target, so target is array
assert data_by_id.target.shape == (expected_observations,
len(target_column))
assert data_by_id.target_names == target_column
assert data_by_id.data.dtype == expected_data_dtype
assert data_by_id.target.dtype == expected_target_dtype
assert len(data_by_id.feature_names) == expected_features
for feature in data_by_id.feature_names:
assert isinstance(feature, str)
# TODO: pass in a list of expected nominal features
for feature, categories in data_by_id.categories.items():
feature_idx = data_by_id.feature_names.index(feature)
values = np.unique(data_by_id.data[:, feature_idx])
values = values[np.isfinite(values)]
assert set(values) <= set(range(len(categories)))
if compare_default_target:
# check whether the data by id and data by id target are equal
data_by_id_default = fetch_openml(data_id=data_id, cache=False,
as_frame=False)
np.testing.assert_allclose(data_by_id.data, data_by_id_default.data)
if data_by_id.target.dtype == np.float64:
np.testing.assert_allclose(data_by_id.target,
data_by_id_default.target)
else:
assert np.array_equal(data_by_id.target, data_by_id_default.target)
if expect_sparse:
assert isinstance(data_by_id.data, scipy.sparse.csr_matrix)
else:
assert isinstance(data_by_id.data, np.ndarray)
# np.isnan doesn't work on CSR matrix
assert (np.count_nonzero(np.isnan(data_by_id.data)) ==
expected_missing)
# test return_X_y option
fetch_func = partial(fetch_openml, data_id=data_id, cache=False,
target_column=target_column, as_frame=False)
check_return_X_y(data_by_id, fetch_func)
return data_by_id
class _MockHTTPResponse:
def __init__(self, data, is_gzip):
self.data = data
self.is_gzip = is_gzip
def read(self, amt=-1):
return self.data.read(amt)
def close(self):
self.data.close()
def info(self):
if self.is_gzip:
return {'Content-Encoding': 'gzip'}
return {}
def __iter__(self):
return iter(self.data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def _monkey_patch_webbased_functions(context,
data_id,
gzip_response):
# monkey patches the urlopen function. Important note: Do NOT use this
# in combination with a regular cache directory, as the files that are
# stored as cache should not be mixed up with real openml datasets
url_prefix_data_description = "https://openml.org/api/v1/json/data/"
url_prefix_data_features = "https://openml.org/api/v1/json/data/features/"
url_prefix_download_data = "https://openml.org/data/v1/"
url_prefix_data_list = "https://openml.org/api/v1/json/data/list/"
path_suffix = '.gz'
read_fn = gzip.open
def _file_name(url, suffix):
return (re.sub(r'\W', '-', url[len("https://openml.org/"):])
+ suffix + path_suffix)
def _mock_urlopen_data_description(url, has_gzip_header):
assert url.startswith(url_prefix_data_description)
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.json'))
if has_gzip_header and gzip_response:
fp = open(path, 'rb')
return _MockHTTPResponse(fp, True)
else:
fp = read_fn(path, 'rb')
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_features(url, has_gzip_header):
assert url.startswith(url_prefix_data_features)
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.json'))
if has_gzip_header and gzip_response:
fp = open(path, 'rb')
return _MockHTTPResponse(fp, True)
else:
fp = read_fn(path, 'rb')
return _MockHTTPResponse(fp, False)
def _mock_urlopen_download_data(url, has_gzip_header):
assert (url.startswith(url_prefix_download_data))
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.arff'))
if has_gzip_header and gzip_response:
fp = open(path, 'rb')
return _MockHTTPResponse(fp, True)
else:
fp = read_fn(path, 'rb')
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
json_file_path = os.path.join(currdir, 'data', 'openml',
str(data_id), _file_name(url, '.json'))
# load the file itself, to simulate a http error
json_data = json.loads(read_fn(json_file_path, 'rb').
read().decode('utf-8'))
if 'error' in json_data:
raise HTTPError(url=None, code=412,
msg='Simulated mock error',
hdrs=None, fp=None)
if has_gzip_header:
fp = open(json_file_path, 'rb')
return _MockHTTPResponse(fp, True)
else:
fp = read_fn(json_file_path, 'rb')
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == "gzip"
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
# XXX: Global variable
if test_offline:
context.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
@pytest.mark.parametrize('feature, expected_dtype', [
({'data_type': 'string', 'number_of_missing_values': '0'}, object),
({'data_type': 'string', 'number_of_missing_values': '1'}, object),
({'data_type': 'numeric', 'number_of_missing_values': '0'}, np.float64),
({'data_type': 'numeric', 'number_of_missing_values': '1'}, np.float64),
({'data_type': 'real', 'number_of_missing_values': '0'}, np.float64),
({'data_type': 'real', 'number_of_missing_values': '1'}, np.float64),
({'data_type': 'integer', 'number_of_missing_values': '0'}, np.int64),
({'data_type': 'integer', 'number_of_missing_values': '1'}, np.float64),
({'data_type': 'nominal', 'number_of_missing_values': '0'}, 'category'),
({'data_type': 'nominal', 'number_of_missing_values': '1'}, 'category'),
])
def test_feature_to_dtype(feature, expected_dtype):
assert _feature_to_dtype(feature) == expected_dtype
@pytest.mark.parametrize('feature', [
{'data_type': 'datatime', 'number_of_missing_values': '0'}
])
def test_feature_to_dtype_error(feature):
msg = 'Unsupported feature: {}'.format(feature)
with pytest.raises(ValueError, match=msg):
_feature_to_dtype(feature)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_iris_pandas(monkeypatch):
# classification dataset with numeric only columns
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 4)
target_shape = (150, )
frame_shape = (150, 5)
target_dtype = CategoricalDtype(['Iris-setosa', 'Iris-versicolor',
'Iris-virginica'])
data_dtypes = [np.float64] * 4
data_names = ['sepallength', 'sepalwidth', 'petallength', 'petalwidth']
target_name = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.dtype == target_dtype
assert target.shape == target_shape
assert target.name == target_name
assert target.index.is_unique
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == data_dtypes + [target_dtype])
assert frame.index.is_unique
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_iris_pandas_equal_to_no_frame(monkeypatch):
# as_frame = True returns the same underlying data as as_frame = False
pytest.importorskip('pandas')
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
frame_bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
frame_data = frame_bunch.data
frame_target = frame_bunch.target
norm_bunch = fetch_openml(data_id=data_id, as_frame=False, cache=False)
norm_data = norm_bunch.data
norm_target = norm_bunch.target
assert_allclose(norm_data, frame_data)
assert_array_equal(norm_target, frame_target)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_iris_multitarget_pandas(monkeypatch):
# classification dataset with numeric only columns
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 3)
target_shape = (150, 2)
frame_shape = (150, 5)
target_column = ['petalwidth', 'petallength']
cat_dtype = CategoricalDtype(['Iris-setosa', 'Iris-versicolor',
'Iris-virginica'])
data_dtypes = [np.float64, np.float64] + [cat_dtype]
data_names = ['sepallength', 'sepalwidth', 'class']
target_dtypes = [np.float64, np.float64]
target_names = ['petalwidth', 'petallength']
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False,
target_column=target_column)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == target_names
assert isinstance(target, pd.DataFrame)
assert np.all(target.dtypes == target_dtypes)
assert target.shape == target_shape
assert np.all(target.columns == target_names)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == [np.float64] * 4 + [cat_dtype])
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_anneal_pandas(monkeypatch):
# classification dataset with numeric and categorical columns
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 2
target_column = 'class'
data_shape = (11, 38)
target_shape = (11,)
frame_shape = (11, 39)
expected_data_categories = 32
expected_data_floats = 6
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True,
target_column=target_column, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
n_categories = len([dtype for dtype in data.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in data.dtypes if dtype.kind == 'f'])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert isinstance(target.dtype, CategoricalDtype)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_cpu_pandas(monkeypatch):
# regression dataset with numeric and categorical columns
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 561
data_shape = (209, 7)
target_shape = (209, )
frame_shape = (209, 8)
cat_dtype = CategoricalDtype(['adviser', 'amdahl', 'apollo', 'basf',
'bti', 'burroughs', 'c.r.d', 'cdc',
'cambex', 'dec', 'dg', 'formation',
'four-phase', 'gould', 'hp', 'harris',
'honeywell', 'ibm', 'ipl', 'magnuson',
'microdata', 'nas', 'ncr', 'nixdorf',
'perkin-elmer', 'prime', 'siemens',
'sperry', 'sratus', 'wang'])
data_dtypes = [cat_dtype] + [np.float64] * 6
feature_names = ['vendor', 'MYCT', 'MMIN', 'MMAX', 'CACH',
'CHMIN', 'CHMAX']
target_name = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.dtypes == data_dtypes)
assert np.all(data.columns == feature_names)
assert np.all(bunch.feature_names == feature_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.dtype == np.float64
assert target.name == target_name
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
def test_fetch_openml_australian_pandas_error_sparse(monkeypatch):
data_id = 292
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
msg = 'Cannot return dataframe with sparse data'
with pytest.raises(ValueError, match=msg):
fetch_openml(data_id=data_id, as_frame=True, cache=False)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_as_frame_auto(monkeypatch):
pd = pytest.importorskip('pandas')
data_id = 61 # iris dataset version 1
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
data = fetch_openml(data_id=data_id, as_frame='auto')
assert isinstance(data.data, pd.DataFrame)
data_id = 292 # Australian dataset version 1
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
data = fetch_openml(data_id=data_id, as_frame='auto')
assert isinstance(data.data, scipy.sparse.csr_matrix)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch):
pytest.importorskip('pandas')
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
msg = 'Could not adhere to working_memory config.'
with pytest.warns(UserWarning, match=msg):
with config_context(working_memory=1e-6):
fetch_openml(data_id=data_id, as_frame=True, cache=False)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_adultcensus_pandas_return_X_y(monkeypatch):
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 1119
data_shape = (10, 14)
target_shape = (10, )
expected_data_categories = 8
expected_data_floats = 6
target_column = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
X, y = fetch_openml(data_id=data_id, as_frame=True, cache=False,
return_X_y=True)
assert isinstance(X, pd.DataFrame)
assert X.shape == data_shape
n_categories = len([dtype for dtype in X.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in X.dtypes if dtype.kind == 'f'])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(y, pd.Series)
assert y.shape == target_shape
assert y.name == target_column
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_adultcensus_pandas(monkeypatch):
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
# Check because of the numeric row attribute (issue #12329)
data_id = 1119
data_shape = (10, 14)
target_shape = (10, )
frame_shape = (10, 15)
expected_data_categories = 8
expected_data_floats = 6
target_column = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
n_categories = len([dtype for dtype in data.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in data.dtypes if dtype.kind == 'f'])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.name == target_column
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_miceprotein_pandas(monkeypatch):
# JvR: very important check, as this dataset defined several row ids
# and ignore attributes. Note that data_features json has 82 attributes,
# and row id (1), ignore attributes (3) have been removed.
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40966
data_shape = (7, 77)
target_shape = (7, )
frame_shape = (7, 78)
target_column = 'class'
frame_n_categories = 1
frame_n_floats = 77
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.dtypes == np.float64)
assert isinstance(target, pd.Series)
assert isinstance(target.dtype, CategoricalDtype)
assert target.shape == target_shape
assert target.name == target_column
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
n_categories = len([dtype for dtype in frame.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == 'f'])
assert frame_n_categories == n_categories
assert frame_n_floats == n_floats
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_emotions_pandas(monkeypatch):
# classification dataset with multiple targets (natively)
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40589
target_column = ['amazed.suprised', 'happy.pleased', 'relaxing.calm',
'quiet.still', 'sad.lonely', 'angry.aggresive']
data_shape = (13, 72)
target_shape = (13, 6)
frame_shape = (13, 78)
expected_frame_categories = 6
expected_frame_floats = 72
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False,
target_column=target_column)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert isinstance(target, pd.DataFrame)
assert target.shape == target_shape
assert np.all(target.columns == target_column)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
n_categories = len([dtype for dtype in frame.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == 'f'])
assert expected_frame_categories == n_categories
assert expected_frame_floats == n_floats
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_titanic_pandas(monkeypatch):
# dataset with strings
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40945
data_shape = (1309, 13)
target_shape = (1309, )
frame_shape = (1309, 14)
name_to_dtype = {
'pclass': np.float64,
'name': object,
'sex': CategoricalDtype(['female', 'male']),
'age': np.float64,
'sibsp': np.float64,
'parch': np.float64,
'ticket': object,
'fare': np.float64,
'cabin': object,
'embarked': CategoricalDtype(['C', 'Q', 'S']),
'boat': object,
'body': np.float64,
'home.dest': object,
'survived': CategoricalDtype(['0', '1'])
}
frame_columns = ['pclass', 'survived', 'name', 'sex', 'age', 'sibsp',
'parch', 'ticket', 'fare', 'cabin', 'embarked',
'boat', 'body', 'home.dest']
frame_dtypes = [name_to_dtype[col] for col in frame_columns]
feature_names = ['pclass', 'name', 'sex', 'age', 'sibsp',
'parch', 'ticket', 'fare', 'cabin', 'embarked',
'boat', 'body', 'home.dest']
target_name = 'survived'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.columns == feature_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.name == target_name
assert target.dtype == name_to_dtype[target_name]
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == frame_dtypes)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_iris(monkeypatch, gzip_response):
# classification dataset with numeric only columns
data_id = 61
data_name = 'iris'
data_version = 1
target_column = 'class'
expected_observations = 150
expected_features = 4
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"Multiple active versions of the dataset matching the name"
" iris exist. Versions may be fundamentally different, "
"returning version 1.",
_fetch_dataset_from_openml,
**{'data_id': data_id, 'data_name': data_name,
'data_version': data_version,
'target_column': target_column,
'expected_observations': expected_observations,
'expected_features': expected_features,
'expected_missing': expected_missing,
'expect_sparse': False,
'expected_data_dtype': np.float64,
'expected_target_dtype': object,
'compare_default_target': True}
)
def test_decode_iris(monkeypatch):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_iris_multitarget(monkeypatch, gzip_response):
# classification dataset with numeric only columns
data_id = 61
data_name = 'iris'
data_version = 1
target_column = ['sepallength', 'sepalwidth']
expected_observations = 150
expected_features = 3
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, np.float64, expect_sparse=False,
compare_default_target=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_anneal(monkeypatch, gzip_response):
# classification dataset with numeric and categorical columns
data_id = 2
data_name = 'anneal'
data_version = 1
target_column = 'class'
# Not all original instances included for space reasons
expected_observations = 11
expected_features = 38
expected_missing = 267
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
def test_decode_anneal(monkeypatch):
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_anneal_multitarget(monkeypatch, gzip_response):
# classification dataset with numeric and categorical columns
data_id = 2
data_name = 'anneal'
data_version = 1
target_column = ['class', 'product-type', 'shape']
# Not all original instances included for space reasons
expected_observations = 11
expected_features = 36
expected_missing = 267
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_cpu(monkeypatch, gzip_response):
# regression dataset with numeric and categorical columns
data_id = 561
data_name = 'cpu'
data_version = 1
target_column = 'class'
expected_observations = 209
expected_features = 7
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, np.float64, expect_sparse=False,
compare_default_target=True)
def test_decode_cpu(monkeypatch):
data_id = 561
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_australian(monkeypatch, gzip_response):
# sparse dataset
# Australian is the only sparse dataset that is reasonably small
# as it is inactive, we need to catch the warning. Due to mocking
# framework, it is not deactivated in our tests
data_id = 292
data_name = 'Australian'
data_version = 1
target_column = 'Y'
# Not all original instances included for space reasons
expected_observations = 85
expected_features = 14
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"Version 1 of dataset Australian is inactive,",
_fetch_dataset_from_openml,
**{'data_id': data_id, 'data_name': data_name,
'data_version': data_version,
'target_column': target_column,
'expected_observations': expected_observations,
'expected_features': expected_features,
'expected_missing': expected_missing,
'expect_sparse': True,
'expected_data_dtype': np.float64,
'expected_target_dtype': object,
'compare_default_target': False} # numpy specific check
)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_adultcensus(monkeypatch, gzip_response):
# Check because of the numeric row attribute (issue #12329)
data_id = 1119
data_name = 'adult-census'
data_version = 1
target_column = 'class'
# Not all original instances included for space reasons
expected_observations = 10
expected_features = 14
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_miceprotein(monkeypatch, gzip_response):
# JvR: very important check, as this dataset defined several row ids
# and ignore attributes. Note that data_features json has 82 attributes,
# and row id (1), ignore attributes (3) have been removed (and target is
# stored in data.target)
data_id = 40966
data_name = 'MiceProtein'
data_version = 4
target_column = 'class'
# Not all original instances included for space reasons
expected_observations = 7
expected_features = 77
expected_missing = 7
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_emotions(monkeypatch, gzip_response):
# classification dataset with multiple targets (natively)
data_id = 40589
data_name = 'emotions'
data_version = 3
target_column = ['amazed.suprised', 'happy.pleased', 'relaxing.calm',
'quiet.still', 'sad.lonely', 'angry.aggresive']
expected_observations = 13
expected_features = 72
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
def test_decode_emotions(monkeypatch):
data_id = 40589
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_open_openml_url_cache(monkeypatch, gzip_response, tmpdir):
data_id = 61
_monkey_patch_webbased_functions(
monkeypatch, data_id, gzip_response)
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
# first fill the cache
response1 = _open_openml_url(openml_path, cache_directory)
# assert file exists
location = _get_local_path(openml_path, cache_directory)
assert os.path.isfile(location)
# redownload, to utilize cache
response2 = _open_openml_url(openml_path, cache_directory)
assert response1.read() == response2.read()
@pytest.mark.parametrize('gzip_response', [True, False])
@pytest.mark.parametrize('write_to_disk', [True, False])
def test_open_openml_url_unlinks_local_path(
monkeypatch, gzip_response, tmpdir, write_to_disk):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
location = _get_local_path(openml_path, cache_directory)
def _mock_urlopen(request):
if write_to_disk:
with open(location, "w") as f:
f.write("")
raise ValueError("Invalid request")
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
with pytest.raises(ValueError, match="Invalid request"):
_open_openml_url(openml_path, cache_directory)
assert not os.path.exists(location)
def test_retry_with_clean_cache(tmpdir):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
location = _get_local_path(openml_path, cache_directory)
os.makedirs(os.path.dirname(location))
with open(location, 'w') as f:
f.write("")
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
# The first call will raise an error since location exists
if os.path.exists(location):
raise Exception("File exist!")
return 1
warn_msg = "Invalid cache, redownloading file"
with pytest.warns(RuntimeWarning, match=warn_msg):
result = _load_data()
assert result == 1
def test_retry_with_clean_cache_http_error(tmpdir):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
raise HTTPError(url=None, code=412,
msg='Simulated mock error',
hdrs=None, fp=None)
error_msg = "Simulated mock error"
with pytest.raises(HTTPError, match=error_msg):
_load_data()
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir):
def _mock_urlopen_raise(request):
raise ValueError('This mechanism intends to test correct cache'
'handling. As such, urlopen should never be '
'accessed. URL: %s' % request.get_full_url())
data_id = 2
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
_monkey_patch_webbased_functions(
monkeypatch, data_id, gzip_response)
X_fetched, y_fetched = fetch_openml(data_id=data_id, cache=True,
data_home=cache_directory,
return_X_y=True, as_frame=False)
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen',
_mock_urlopen_raise)
X_cached, y_cached = fetch_openml(data_id=data_id, cache=True,
data_home=cache_directory,
return_X_y=True, as_frame=False)
np.testing.assert_array_equal(X_fetched, X_cached)
np.testing.assert_array_equal(y_fetched, y_cached)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_notarget(monkeypatch, gzip_response):
data_id = 61
target_column = None
expected_observations = 150
expected_features = 5
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
data = fetch_openml(data_id=data_id, target_column=target_column,
cache=False, as_frame=False)
assert data.data.shape == (expected_observations, expected_features)
assert data.target is None
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_inactive(monkeypatch, gzip_response):
# fetch inactive dataset by id
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
glas2 = assert_warns_message(
UserWarning, "Version 1 of dataset glass2 is inactive,", fetch_openml,
data_id=data_id, cache=False, as_frame=False)
# fetch inactive dataset by name and version
assert glas2.data.shape == (163, 9)
glas2_by_version = assert_warns_message(
UserWarning, "Version 1 of dataset glass2 is inactive,", fetch_openml,
data_id=None, name="glass2", version=1, cache=False, as_frame=False)
assert int(glas2_by_version.details['id']) == data_id
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_nonexiting(monkeypatch, gzip_response):
# there is no active version of glass2
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# Note that we only want to search by name (not data id)
assert_raise_message(ValueError, "No active dataset glass2 found",
fetch_openml, name='glass2', cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_raises_illegal_multitarget(monkeypatch, gzip_response):
data_id = 61
targets = ['sepalwidth', 'class']
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# Note that we only want to search by name (not data id)
assert_raise_message(ValueError,
"Can only handle homogeneous multi-target datasets,",
fetch_openml, data_id=data_id,
target_column=targets, cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_warn_ignore_attribute(monkeypatch, gzip_response):
data_id = 40966
expected_row_id_msg = "target_column={} has flag is_row_identifier."
expected_ignore_msg = "target_column={} has flag is_ignore."
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
assert_warns_message(UserWarning, expected_row_id_msg.format('MouseID'),
fetch_openml, data_id=data_id,
target_column='MouseID',
cache=False, as_frame=False)
assert_warns_message(UserWarning, expected_ignore_msg.format('Genotype'),
fetch_openml, data_id=data_id,
target_column='Genotype',
cache=False, as_frame=False)
# multi column test
assert_warns_message(UserWarning, expected_row_id_msg.format('MouseID'),
fetch_openml, data_id=data_id,
target_column=['MouseID', 'class'],
cache=False, as_frame=False)
assert_warns_message(UserWarning, expected_ignore_msg.format('Genotype'),
fetch_openml, data_id=data_id,
target_column=['Genotype', 'class'],
cache=False, as_frame=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_string_attribute_without_dataframe(monkeypatch, gzip_response):
data_id = 40945
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
assert_raise_message(ValueError,
('STRING attributes are not supported for '
'array representation. Try as_frame=True'),
fetch_openml, data_id=data_id, cache=False,
as_frame=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_dataset_with_openml_error(monkeypatch, gzip_response):
data_id = 1
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"OpenML registered a problem with the dataset. It might be unusable. "
"Error:",
fetch_openml, data_id=data_id, cache=False, as_frame=False
)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_dataset_with_openml_warning(monkeypatch, gzip_response):
data_id = 3
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"OpenML raised a warning on the dataset. It might be unusable. "
"Warning:",
fetch_openml, data_id=data_id, cache=False, as_frame=False
)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_illegal_column(monkeypatch, gzip_response):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_raise_message(KeyError, "Could not find target_column=",
fetch_openml, data_id=data_id,
target_column='undefined', cache=False)
assert_raise_message(KeyError, "Could not find target_column=",
fetch_openml, data_id=data_id,
target_column=['undefined', 'class'],
cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_raises_missing_values_target(monkeypatch, gzip_response):
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_raise_message(ValueError, "Target column ",
fetch_openml, data_id=data_id, target_column='family')
def test_fetch_openml_raises_illegal_argument():
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name="name")
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name=None,
version="version")
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name="name",
version="version")
assert_raise_message(ValueError, "Neither name nor data_id are provided. "
"Please provide name or data_id.", fetch_openml)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_with_ignored_feature(monkeypatch, gzip_response):
# Regression test for #14340
# 62 is the ID of the ZOO dataset
data_id = 62
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
dataset = sklearn.datasets.fetch_openml(data_id=data_id, cache=False,
as_frame=False)
assert dataset is not None
# The dataset has 17 features, including 1 ignored (animal),
# so we assert that we don't have the ignored feature in the final Bunch
assert dataset['data'].shape == (101, 16)
assert 'animal' not in dataset['feature_names']
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize('as_frame', [True, False])
def test_fetch_openml_verify_checksum(monkeypatch, as_frame, cache, tmpdir):
if as_frame:
pytest.importorskip('pandas')
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
# create a temporary modified arff file
dataset_dir = os.path.join(currdir, 'data', 'openml', str(data_id))
original_data_path = os.path.join(dataset_dir,
'data-v1-download-1666876.arff.gz')
corrupt_copy = os.path.join(tmpdir, "test_invalid_checksum.arff")
with gzip.GzipFile(original_data_path, "rb") as orig_gzip, \
gzip.GzipFile(corrupt_copy, "wb") as modified_gzip:
data = bytearray(orig_gzip.read())
data[len(data)-1] = 37
modified_gzip.write(data)
# Requests are already mocked by monkey_patch_webbased_functions.
# We want to re-use that mock for all requests except file download,
# hence creating a thin mock over the original mock
mocked_openml_url = sklearn.datasets._openml.urlopen
def swap_file_mock(request):
url = request.get_full_url()
if url.endswith('data/v1/download/1666876'):
return _MockHTTPResponse(open(corrupt_copy, "rb"), is_gzip=True)
else:
return mocked_openml_url(request)
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', swap_file_mock)
# validate failed checksum
with pytest.raises(ValueError) as exc:
sklearn.datasets.fetch_openml(data_id=data_id, cache=False,
as_frame=as_frame)
# exception message should have file-path
assert exc.match("1666876")
def test_convert_arff_data_type():
pytest.importorskip('pandas')
arff: ArffContainerType = {
'data': (el for el in range(2)),
'description': '',
'relation': '',
'attributes': []
}
msg = r"shape must be provided when arr\['data'\] is a Generator"
with pytest.raises(ValueError, match=msg):
_convert_arff_data(arff, [0], [0], shape=None)
arff = {
'data': list(range(2)),
'description': '',
'relation': '',
'attributes': []
}
msg = r"arff\['data'\] must be a generator when converting to pd.DataFrame"
with pytest.raises(ValueError, match=msg):
_convert_arff_data_dataframe(arff, ['a'], {})
| bsd-3-clause |
khkaminska/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
petebachant/seaborn | seaborn/tests/test_utils.py | 11 | 11537 | """Tests for plotting utilities."""
import warnings
import tempfile
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nose
import nose.tools as nt
from nose.tools import assert_equal, raises
import numpy.testing as npt
import pandas.util.testing as pdt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
from pandas.util.testing import network
from ..utils import get_dataset_names, load_dataset
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
from .. import utils, rcmod
a_norm = np.random.randn(100)
def test_pmf_hist_basics():
"""Test the function to return barplot args for pmf hist."""
out = utils.pmf_hist(a_norm)
assert_equal(len(out), 3)
x, h, w = out
assert_equal(len(x), len(h))
# Test simple case
a = np.arange(10)
x, h, w = utils.pmf_hist(a, 10)
nose.tools.assert_true(np.all(h == h[0]))
def test_pmf_hist_widths():
"""Test histogram width is correct."""
x, h, w = utils.pmf_hist(a_norm)
assert_equal(x[1] - x[0], w)
def test_pmf_hist_normalization():
"""Test that output data behaves like a PMF."""
x, h, w = utils.pmf_hist(a_norm)
nose.tools.assert_almost_equal(sum(h), 1)
nose.tools.assert_less_equal(h.max(), 1)
def test_pmf_hist_bins():
"""Test bin specification."""
x, h, w = utils.pmf_hist(a_norm, 20)
assert_equal(len(x), 20)
def test_ci_to_errsize():
"""Test behavior of ci_to_errsize."""
cis = [[.5, .5],
[1.25, 1.5]]
heights = [1, 1.5]
actual_errsize = np.array([[.5, 1],
[.25, 0]])
test_errsize = utils.ci_to_errsize(cis, heights)
npt.assert_array_equal(actual_errsize, test_errsize)
def test_desaturate():
"""Test color desaturation."""
out1 = utils.desaturate("red", .5)
assert_equal(out1, (.75, .25, .25))
out2 = utils.desaturate("#00FF00", .5)
assert_equal(out2, (.25, .75, .25))
out3 = utils.desaturate((0, 0, 1), .5)
assert_equal(out3, (.25, .25, .75))
out4 = utils.desaturate("red", .5)
assert_equal(out4, (.75, .25, .25))
@raises(ValueError)
def test_desaturation_prop():
"""Test that pct outside of [0, 1] raises exception."""
utils.desaturate("blue", 50)
def test_saturate():
"""Test performance of saturation function."""
out = utils.saturate((.75, .25, .25))
assert_equal(out, (1, 0, 0))
def test_iqr():
"""Test the IQR function."""
a = np.arange(5)
iqr = utils.iqr(a)
assert_equal(iqr, 2)
class TestSpineUtils(object):
sides = ["left", "right", "bottom", "top"]
outer_sides = ["top", "right"]
inner_sides = ["left", "bottom"]
offset = 10
original_position = ("outward", 0)
offset_position = ("outward", offset)
def test_despine(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine()
for side in self.outer_sides:
nt.assert_true(~ax.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine(**dict(zip(self.sides, [True] * 4)))
for side in self.sides:
nt.assert_true(~ax.spines[side].get_visible())
plt.close("all")
def test_despine_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(ax=ax2)
for side in self.sides:
nt.assert_true(ax1.spines[side].get_visible())
for side in self.outer_sides:
nt.assert_true(~ax2.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax2.spines[side].get_visible())
plt.close("all")
def test_despine_with_offset(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.despine(ax=ax, offset=self.offset)
for side in self.sides:
is_visible = ax.spines[side].get_visible()
new_position = ax.spines[side].get_position()
if is_visible:
nt.assert_equal(new_position, self.offset_position)
else:
nt.assert_equal(new_position, self.original_position)
plt.close("all")
def test_despine_with_offset_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
if ax2.spines[side].get_visible():
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
else:
nt.assert_equal(ax2.spines[side].get_position(),
self.original_position)
plt.close("all")
def test_despine_trim_spines(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_xlim(.75, 3.25)
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
plt.close("all")
def test_despine_trim_inverted(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_ylim(.85, 3.15)
ax.invert_yaxis()
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
plt.close("all")
def test_despine_trim_noticks(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_yticks([])
utils.despine(trim=True)
nt.assert_equal(ax.get_yticks().size, 0)
def test_offset_spines_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
utils.offset_spines(offset=self.offset)
nt.assert_true('deprecated' in str(w[0].message))
nt.assert_true(issubclass(w[0].category, UserWarning))
plt.close('all')
def test_offset_spines(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.offset_spines(offset=self.offset)
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.offset_position)
plt.close("all")
def test_offset_spines_specific_axes(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, (ax1, ax2) = plt.subplots(2, 1)
utils.offset_spines(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
plt.close("all")
def test_ticklabels_overlap():
rcmod.set()
f, ax = plt.subplots(figsize=(2, 2))
f.tight_layout() # This gets the Agg renderer working
assert not utils.axis_ticklabels_overlap(ax.get_xticklabels())
big_strings = "abcdefgh", "ijklmnop"
ax.set_xlim(-.5, 1.5)
ax.set_xticks([0, 1])
ax.set_xticklabels(big_strings)
assert utils.axis_ticklabels_overlap(ax.get_xticklabels())
x, y = utils.axes_ticklabels_overlap(ax)
assert x
assert not y
def test_categorical_order():
x = ["a", "c", "c", "b", "a", "d"]
y = [3, 2, 5, 1, 4]
order = ["a", "b", "c", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(x, order)
nt.assert_equal(out, order)
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
out = utils.categorical_order(np.array(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(pd.Series(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(y)
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(np.array(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(pd.Series(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
if pandas_has_categoricals:
x = pd.Categorical(x, order)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.categories))
x = pd.Series(x)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.cat.categories))
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
x = ["a", np.nan, "c", "c", "b", "a", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
if LooseVersion(pd.__version__) >= "0.15":
def check_load_dataset(name):
ds = load_dataset(name, cache=False)
assert(isinstance(ds, pd.DataFrame))
def check_load_cached_dataset(name):
# Test the cacheing using a temporary file.
# With Python 3.2+, we could use the tempfile.TemporaryDirectory()
# context manager instead of this try...finally statement
tmpdir = tempfile.mkdtemp()
try:
# download and cache
ds = load_dataset(name, cache=True, data_home=tmpdir)
# use cached version
ds2 = load_dataset(name, cache=True, data_home=tmpdir)
pdt.assert_frame_equal(ds, ds2)
finally:
shutil.rmtree(tmpdir)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_get_dataset_names():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
names = get_dataset_names()
assert(len(names) > 0)
assert(u"titanic" in names)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_dataset(name)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_cached_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_cached_dataset(name)
| bsd-3-clause |
intel-analytics/analytics-zoo | pyzoo/zoo/orca/learn/tf2/estimator.py | 1 | 21473 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import logging
import pickle
import numpy as np
import ray
from zoo.orca.data.ray_xshards import RayXShards
from zoo.orca.learn.dl_cluster import RayDLCluster
from zoo.orca.learn.tf2.tf_runner import TFRunner
from zoo.orca.learn.ray_estimator import Estimator as OrcaRayEstimator
from zoo.orca.learn.utils import maybe_dataframe_to_xshards, dataframe_to_xshards, \
convert_predict_xshards_to_dataframe, update_predict_xshards, \
process_xshards_of_pandas_dataframe
from zoo.orca.data.utils import process_spark_xshards
from zoo.ray import RayContext
logger = logging.getLogger(__name__)
class Estimator(object):
@staticmethod
def from_keras(*,
model_creator,
config=None,
verbose=False,
workers_per_node=1,
compile_args_creator=None,
backend="tf2",
cpu_binding=True,
):
"""
Create an Estimator for tensorflow 2.
:param model_creator: (dict -> Model) This function takes in the `config`
dict and returns a compiled TF model.
:param config: (dict) configuration passed to 'model_creator',
'data_creator'. Also contains `fit_config`, which is passed
into `model.fit(data, **fit_config)` and
`evaluate_config` which is passed into `model.evaluate`.
:param verbose: (bool) Prints output of one model if true.
:param workers_per_node: (Int) worker number on each node. default: 1.
:param compile_args_creator: (dict -> dict of loss, optimizer and metrics) Only used when
the backend="horovod". This function takes in the `config` dict and returns a
dictionary like {"optimizer": tf.keras.optimizers.SGD(lr), "loss":
"mean_squared_error", "metrics": ["mean_squared_error"]}
:param backend: (string) You can choose "horovod" or "tf2" as backend. Default: `tf2`.
:param cpu_binding: (bool) Whether to binds threads to specific CPUs. Default: True
"""
return TensorFlow2Estimator(model_creator=model_creator, config=config,
verbose=verbose, workers_per_node=workers_per_node,
backend=backend, compile_args_creator=compile_args_creator,
cpu_binding=cpu_binding)
def make_data_creator(refs):
def data_creator(config, batch_size):
return refs
return data_creator
def data_length(data):
x = data["x"]
if isinstance(x, np.ndarray):
return x.shape[0]
else:
return x[0].shape[0]
class TensorFlow2Estimator(OrcaRayEstimator):
def __init__(self,
model_creator,
compile_args_creator=None,
config=None,
verbose=False,
backend="tf2",
workers_per_node=1,
cpu_binding=True):
self.model_creator = model_creator
self.compile_args_creator = compile_args_creator
self.config = {} if config is None else config
self.verbose = verbose
ray_ctx = RayContext.get()
if "batch_size" in self.config:
raise Exception("Please do not specify batch_size in config. Input batch_size in the"
" fit/evaluate function of the estimator instead.")
if "inter_op_parallelism" not in self.config:
self.config["inter_op_parallelism"] = 1
if "intra_op_parallelism" not in self.config:
self.config["intra_op_parallelism"] = ray_ctx.ray_node_cpu_cores // workers_per_node
if backend == "horovod":
assert compile_args_creator is not None, "compile_args_creator should not be None," \
" when backend is set to horovod"
params = {
"model_creator": model_creator,
"compile_args_creator": compile_args_creator,
"config": self.config,
"verbose": self.verbose,
}
if backend == "tf2":
cores_per_node = ray_ctx.ray_node_cpu_cores // workers_per_node
num_nodes = ray_ctx.num_ray_nodes * workers_per_node
self.cluster = RayDLCluster(
num_workers=num_nodes,
worker_cores=cores_per_node,
worker_cls=TFRunner,
worker_param=params,
cpu_binding=cpu_binding
)
self.remote_workers = self.cluster.get_workers()
ips = ray.get(
[worker.get_node_ip.remote() for worker in self.remote_workers])
ports = ray.get(
[worker.find_free_port.remote() for worker in self.remote_workers])
urls = ["{ip}:{port}".format(ip=ips[i], port=ports[i])
for i in range(len(self.remote_workers))]
ray.get([worker.setup.remote() for worker in self.remote_workers])
# Get setup tasks in order to throw errors on failure
ray.get([
worker.setup_distributed.remote(urls, i, len(self.remote_workers))
for i, worker in enumerate(self.remote_workers)])
elif backend == "horovod":
# it is necessary to call self.run first to set horovod environment
from zoo.orca.learn.horovod.horovod_ray_runner import HorovodRayRunner
horovod_runner = HorovodRayRunner(ray_ctx,
worker_cls=TFRunner,
worker_param=params,
workers_per_node=workers_per_node)
horovod_runner.run(lambda: print("worker initialized"))
self.remote_workers = horovod_runner.remote_workers
ray.get([worker.setup.remote() for worker in self.remote_workers])
ray.get([
worker.setup_horovod.remote()
for i, worker in enumerate(self.remote_workers)])
else:
raise Exception("Only \"tf2\" and \"horovod\" are legal "
"values of backend, but got {}".format(backend))
self.num_workers = len(self.remote_workers)
def fit(self, data, epochs=1, batch_size=32, verbose=1,
callbacks=None, validation_data=None, class_weight=None,
steps_per_epoch=None, validation_steps=None, validation_freq=1,
data_config=None, feature_cols=None,
label_cols=None):
"""
Train this tensorflow model with train data.
:param data: train data. It can be XShards, Spark DataFrame or creator function which
returns Iter or DataLoader.
If data is XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature, 'y': label}, where feature(label) is a numpy array or a tuple of
numpy arrays.
:param epochs: Number of epochs to train the model. Default: 1.
:param batch_size: Batch size used for training. Default: 32.
:param verbose: Prints output of one model if true.
:param callbacks: List of Keras compatible callbacks to apply during training.
:param validation_data: validation data. Validation data type should be the same
as train data.
:param class_weight: Optional dictionary mapping class indices (integers) to a weight
(float) value, used for weighting the loss function. This can be useful to tell
the model to "pay more attention" to samples from an under-represented class.
:param steps_per_epoch: Total number of steps (batches of samples) before declaring one
epoch finished and starting the next epoch. If `steps_pre_epoch` is `None`, the
epoch will run until the input dataset is exhausted. When passing an infinitely
repeating dataset, you must specify the `step_per_epoch` argument.
:param validation_steps: Total number of steps (batches of samples) to draw before stopping
when performing validation at the end of every epoch. Default: None.
:param validation_freq: Only relevant if validation data is provided. Integer of
`collections_abc.Container` instance (e.g. list, tuple, etc.). If an integer,
specifies how many training epochs to run before a new validation run is performed,
e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies
the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
:param data_config: An optional dictionary that can be passed to data creator function.
:param feature_cols: Feature column name(s) of data. Only used when data is a Spark
DataFrame or an XShards of Pandas DataFrame. Default: None.
:param label_cols: Label column name(s) of data. Only used when data is a Spark DataFrame or
an XShards of Pandas DataFrame.
Default: None.
:return:
"""
params = dict(
epochs=epochs,
batch_size=batch_size,
verbose=verbose,
callbacks=callbacks,
class_weight=class_weight,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq,
data_config=data_config
)
from zoo.orca.data import SparkXShards
data, validation_data = maybe_dataframe_to_xshards(data, validation_data,
feature_cols, label_cols,
mode="fit",
num_workers=self.num_workers)
if isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data, validation_data = process_xshards_of_pandas_dataframe(data, feature_cols,
label_cols,
validation_data, "fit")
ray_xshards = process_spark_xshards(data, self.num_workers)
if validation_data is None:
def transform_func(worker, partition_refs):
params["data_creator"] = make_data_creator(partition_refs)
return worker.step.remote(**params)
worker_stats = ray_xshards.reduce_partitions_for_actors(self.remote_workers,
transform_func)
else:
val_ray_xshards = process_spark_xshards(validation_data, self.num_workers)
def zip_func(worker, this_partition_refs, that_partition_refs):
params["data_creator"] = make_data_creator(this_partition_refs)
params["validation_data_creator"] = \
make_data_creator(that_partition_refs)
return worker.step.remote(**params)
worker_stats = ray_xshards.zip_reduce_shards_with_actors(val_ray_xshards,
self.remote_workers,
zip_func)
else:
params["data_creator"] = data
params["validation_data_creator"] = validation_data
params_list = [params] * self.num_workers
worker_stats = ray.get([self.remote_workers[i].step.remote(**params_list[i])
for i in range(self.num_workers)])
worker_stats = list(itertools.chain.from_iterable(worker_stats))
stats = worker_stats[0].copy()
return stats
def evaluate(self, data, batch_size=32, num_steps=None, verbose=1,
sample_weight=None, callbacks=None, data_config=None,
feature_cols=None, label_cols=None):
"""
Evaluates the model on the validation data set.
:param data: evaluate data. It can be XShards, Spark DataFrame or creator function which
returns Iter or DataLoader.
If data is XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature, 'y': label}, where feature(label) is a numpy array or a tuple of
numpy arrays.
:param batch_size: Batch size used for evaluation. Default: 32.
:param num_steps: Total number of steps (batches of samples) before declaring the evaluation
round finished. Ignored with the default value of `None`.
:param verbose: Prints output of one model if true.
:param sample_weight: Optional Numpy array of weights for the training samples, used for
weighting the loss function. You can either pass a flat (1D) Numpy array with the
same length as the input samples (1:1 mapping between weights and samples), or in
the case of temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of every sample.
:param callbacks: List of Keras compatible callbacks to apply during evaluation.
:param data_config: An optional dictionary that can be passed to data creator function.
:param feature_cols: Feature column name(s) of data. Only used when data is a Spark
DataFrame or an XShards of Pandas DataFrame. Default: None.
:param label_cols: Label column name(s) of data. Only used when data is a Spark DataFrame or
an XShards of Pandas DataFrame.
Default: None.
:return: validation result
"""
logger.info("Starting validation step.")
params = dict(
batch_size=batch_size,
verbose=verbose,
sample_weight=sample_weight,
steps=num_steps,
callbacks=callbacks,
data_config=data_config,
)
from zoo.orca.data import SparkXShards
data, _ = maybe_dataframe_to_xshards(data,
validation_data=None,
feature_cols=feature_cols,
label_cols=label_cols,
mode="evaluate",
num_workers=self.num_workers)
if isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data = process_xshards_of_pandas_dataframe(data, feature_cols, label_cols)
data = data
if data.num_partitions() != self.num_workers:
data = data.repartition(self.num_workers)
ray_xshards = RayXShards.from_spark_xshards(data)
def transform_func(worker, partition_refs):
params["data_creator"] = make_data_creator(partition_refs)
return worker.validate.remote(**params)
worker_stats = ray_xshards.reduce_partitions_for_actors(self.remote_workers,
transform_func)
else: # data_creator functions; should return Iter or DataLoader
params["data_creator"] = data
params_list = [params] * self.num_workers
worker_stats = ray.get([w.validate.remote(**params_list[i])
for i, w in enumerate(self.remote_workers)])
worker_stats = list(itertools.chain.from_iterable(worker_stats))
stats = worker_stats[0].copy()
return stats
def _predict_spark_xshards(self, xshards, params):
ray_xshards = RayXShards.from_spark_xshards(xshards)
def transform_func(worker, shards_ref):
params["data_creator"] = make_data_creator(shards_ref)
return worker.predict.remote(**params)
pred_shards = ray_xshards.transform_shards_with_actors(self.remote_workers,
transform_func)
spark_xshards = pred_shards.to_spark_xshards()
return spark_xshards
def predict(self, data, batch_size=None, verbose=1,
steps=None, callbacks=None, data_config=None,
feature_cols=None):
"""
Predict the input data
:param data: predict input data. It can be XShards or Spark DataFrame.
If data is XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature}, where feature is a numpy array or a tuple of numpy arrays.
:param batch_size: Batch size used for inference. Default: None.
:param verbose: Prints output of one model if true.
:param steps: Total number of steps (batches of samples) before declaring the prediction
round finished. Ignored with the default value of None.
:param callbacks: List of Keras compatible callbacks to apply during prediction.
:param data_config: An optional dictionary that can be passed to data creator function.
:param feature_cols: Feature column name(s) of data. Only used when data is a Spark
DataFrame or an XShards of Pandas DataFrame. Default: None.
:return:
"""
logger.info("Starting predict step.")
params = dict(
verbose=verbose,
batch_size=batch_size,
steps=steps,
callbacks=callbacks,
data_config=data_config,
)
from zoo.orca.data import SparkXShards
from pyspark.sql import DataFrame
if isinstance(data, DataFrame):
xshards, _ = dataframe_to_xshards(data,
validation_data=None,
feature_cols=feature_cols,
label_cols=None,
mode="predict")
pred_shards = self._predict_spark_xshards(xshards, params)
result = convert_predict_xshards_to_dataframe(data, pred_shards)
elif isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data = process_xshards_of_pandas_dataframe(data, feature_cols)
pred_shards = self._predict_spark_xshards(data, params)
result = update_predict_xshards(data, pred_shards)
else:
raise ValueError("Only xshards or Spark DataFrame is supported for predict")
return result
def get_model(self):
"""
Returns the learned model.
:return: the learned model.
"""
state_refs = [w.get_state.remote() for w in self.remote_workers]
state = ray.get(state_refs[0])
return self._get_model_from_state(state)
def save(self, checkpoint):
"""
Saves the model at the provided checkpoint.
:param checkpoint: (str) Path to the target checkpoint file.
"""
# Some model might need to aggregate variables during checkpointing
# which requires both the chief and workers to participate in the
# allreduce communication protocol.
# So we need to call get_state on every remote workers, otherwise
# it might get stuck
state_refs = [w.get_state.remote() for w in self.remote_workers]
state = ray.get(state_refs[0])
with open(checkpoint, "wb") as f:
pickle.dump(state, f)
return checkpoint
def load(self, checkpoint, **kwargs):
"""
Loads the model from the provided checkpoint.
:param checkpoint: (str) Path to target checkpoint file.
"""
with open(checkpoint, "rb") as f:
state = pickle.load(f)
state_id = ray.put(state)
ray.get([worker.set_state.remote(state_id) for worker in self.remote_workers])
def shutdown(self):
"""
Shuts down workers and releases resources.
"""
for worker in self.remote_workers:
worker.shutdown.remote()
worker.__ray_terminate__.remote()
def _get_model_from_state(self, state):
"""Creates model and load weights from state"""
# keep the same behavior as `set_state` in `load` do
model = self.model_creator(self.config)
model.set_weights(state["weights"])
return model
| apache-2.0 |
datapythonista/pandas | pandas/tests/frame/methods/test_count.py | 3 | 1081 | from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
class TestDataFrameCount:
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=range(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=range(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
| bsd-3-clause |
lujunyan1118/SimpleBool | BoolSimu.py | 1 | 14680 | #!/bin/env python
'''
SimpleBool is a python package for dynamic simulations of boolean network
Copyright (C) 2013 Junyan Lu
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import division
__metaclass__=type
import random
import sys
class Model:
'''
random.seed()
KEEP={}
INITIAL={}
REG_NODES=[]
TRUTH_TAB=[]
MAPPING={}
INPUT={}
'''
def __init__(self,para,mut_nodes=[]):
random.seed()
self.KEEP={}
self.INITIAL={}
self.REG_NODES=[]
self.TRUTH_TAB=[]
self.MAPPING={}
self.INPUT={}
self.FINAL={}
def ParaParser(ParaFile):
'''#parser parameters for simulation and transition matrix building'''
INPUT = {'rules' : 'rules.txt',
'ini_on' : '',
'ini_off' : '',
'turn_on' : '',
'turn_off' : '',
'rounds' : 1,
'steps' : 1,
'mode' : 'Sync',
'plot_nodes' : '',
'missing' : 'random'
} # define parameters
for each_line in open(ParaFile).readlines():
para_name = each_line.split('=')[0].strip()
para_value = each_line.split('=')[1].strip()
if para_name in INPUT.keys():
INPUT[para_name] = para_value
else:
#print "Error: Unknown Parameters: %s" % para_name
pass
# formalize parameters
try:
INPUT['rules'] = str(INPUT['rules'])
INPUT['ini_on'] = [node.strip() for node in INPUT['ini_on'].split(',')]
INPUT['ini_off'] = [node.strip() for node in INPUT['ini_off'].split(',')]
INPUT['turn_on'] = [node.strip() for node in INPUT['turn_on'].split(',')]
INPUT['turn_off'] = [node.strip() for node in INPUT['turn_off'].split(',')]
INPUT['plot_nodes'] = [node.strip() for node in INPUT['plot_nodes'].split(',')]
INPUT['rounds'] = int(INPUT['rounds'])
INPUT['steps'] = int(INPUT['steps'])
INPUT['mode']=str(INPUT['mode'])
INPUT['missing'] = {'random':'random','True':1,'true':1,'False':0,'false':0}[str(INPUT['missing'])]
for empty_keys in INPUT.keys():
if INPUT[empty_keys] == ['']: INPUT[empty_keys] = []
except:
print "Error: Invalid input data types!"
if INPUT['mode'] not in ['GA', 'Sync','ROA']: print "Wrong simulation method! Using 'Sync', 'GA' or 'ROA'"
return INPUT
def GetNodes(expression):
'''convert one line of expression to a node list'''
nodes = []
other = ['=', 'and', 'or', 'not'] # remove operator signs
for node in expression.split():
node=node.strip('*() ')
if node not in other:
nodes.append(node) # remove * ( ) from the node name
return nodes
def IterState(expression, keep):
'''Iterate all the state of input node and output all the inital state and the value of the target node,
used to construct truth table.
Return a list of tuples, the first tuple contain the index of target node and its regulators,
the rest of the tuple contain all the possible state of the target node and its regulators,
the first element in the tuple is the state of target'''
nodes = GetNodes(expression)
record = [] # to store results
all_regulator = nodes[1:] # all regulator of the target
target_node = nodes[0]
record.append(tuple([target_node] + all_regulator)) # record the target node and free regulator
bool_func = expression.split('=')[1].strip()
total_ini = 2 ** len(all_regulator) # n nodes have 2**n combinations
for node in set(all_regulator) & set(keep.keys()):
vars()[node] = keep[node] # set the value of keeped nodes
for index in xrange(total_ini):
state = bin(index)[2:].zfill(len(all_regulator)) # conver a interger to a boolean string with specified length
for i in range(len(all_regulator)):
vars()[all_regulator[i]] = int(state[i]) # set the node variable to logical state, if it is not keeped
if target_node not in keep:
target_val = int(eval(bool_func)) # caculate the target node's value, kept nodes are considered implicitly
else: # if the node value has been keeped by hand, than used that value iregulate of the state of its regulators
target_val = int(keep[target_node])
record.append(tuple([target_val] + [int(n) for n in state]))
return record
def ConstructTruthTab(booltext, keep):
'''Construct the truth table that contain all the possibile input state and output state for each node'''
all_result = []
all_nodes = set([]) # all nodes in boolean rule file
target_nodes = set([]) # nodes have regulators in boolean rule file
RegNodes = [] # a list contain regulator of each node as a tuple. The tuple index in the list is the target node index
TruthTab = [] # a list of dictionary contain the truth table for each node. The sequence is in consist with node sequence in mapping
for line in booltext.split('\n'):
if line.strip() != '' and line[0] != '#':
line_nodes = GetNodes(line)
target_nodes = target_nodes | set([line_nodes[0]])
all_nodes = all_nodes | set(line_nodes)
if line_nodes[0] not in keep.keys():
try:
all_result.append(IterState(line, keep))
except:
print "Expressing error of boolean function"
print line
else: #if the node has been kept
all_result.append([(line_nodes[0], line_nodes[0]), (1, 1), (0, 0)])
unmapped = all_nodes - target_nodes # find the node that do not have regulator, and not specified in the keep list
for unmapped_id in unmapped:
all_result.append([(unmapped_id, unmapped_id), (1, 1), (0, 0)]) # if the node do not have any regulate node, then it regulate by itself
sorted_all = sorted(all_result, key=lambda x:x[0][0])
mappings = dict(zip([node[0][0] for node in sorted_all], range(len(sorted_all))))
# generate list of regulators for each node and the truth table, sorted as the mappings
for each_node in sorted_all:
state_dic = {}
regulators = tuple([mappings[node] for node in each_node[0][1:]])
RegNodes.append(regulators)
for each_state in each_node[1:]:
state_dic[each_state[1:]] = each_state[0]
TruthTab.append(state_dic)
return RegNodes, TruthTab, mappings #
INPUT=ParaParser(para)
self.INPUT=INPUT
for on_nodes in INPUT['ini_on']:
self.INITIAL[on_nodes] = True
for off_nodes in INPUT['ini_off']:
self.INITIAL[off_nodes] = False
for on_nodes in INPUT['turn_on']:
self.INITIAL[on_nodes] = True
self.KEEP[on_nodes] = True
for off_nodes in INPUT['turn_off']:
self.KEEP[off_nodes] = False
self.INITIAL[off_nodes] = False
for node,state in mut_nodes:
self.KEEP[node]=state
self.INITIAL[node]=state
self.REG_NODES,self.TRUTH_TAB,self.MAPPING=ConstructTruthTab(open(INPUT['rules']).read(),self.KEEP)
model_verbose={'Sync':'Synchronous','GA':'General Asynchrounous','ROA':'Random Order Asynchrounous'}
print '''Model initialization completed!
Total nodes number: %s
Simulation steps: %s
Simulation rounds: %s
Simulation mode: %s
'''%(len(self.MAPPING.keys()),INPUT['steps'],INPUT['rounds'],model_verbose[INPUT['mode']])
def GetNodes(self):
return sorted(self.MAPPING)
def GetFixed(self,file_out='steady.txt'):
all_nodes=self.GetNodes()
on_nodes=[]
off_nodes=[]
output=file(file_out,'w')
for node in all_nodes:
output.writelines('%s\t%s\n'%(node,self.FINAL[node]))
if self.FINAL[node] == 0:
off_nodes.append(node)
elif self.FINAL[node] == 1:
on_nodes.append(node)
print '''%s nodes stabilized on 'ON' state: %s '''%(len(on_nodes),','.join(on_nodes))
print '''%s nodes stabilized on 'OFF' state: %s '''%(len(off_nodes),','.join(off_nodes))
output.close()
def IterModel(self,missing='random'):
traj_all=[]
steps=self.INPUT['steps']
rounds=self.INPUT['rounds']
missing=self.INPUT['missing']
collect=[[0]*len(self.MAPPING)]*(steps+1)
def IterOneSync(InitialState):
'''Iterate model using sychronous method. The most time consuming part, need to be carefully optimized'''
NewState = [str(self.TRUTH_TAB[i][tuple([int(InitialState[j]) for j in self.REG_NODES[i]])]) for i in range(len(InitialState))]
return ''.join(NewState)
def IterOneAsync(InitialState):
'''Iterate model using asynchronous method (General Asynchronous model: update one random node per step)'''
update_index=random.randint(0,len(InitialState)-1)
NewState=list(InitialState)
NewState[update_index] = str(self.TRUTH_TAB[update_index][tuple([int(InitialState[index]) for index in self.REG_NODES[update_index]])])
return ''.join(NewState)
def IterOneROA(InitialState):
seq=range(len(InitialState))
random.shuffle(seq) # generate a random sequence of updating list
NewState=list(InitialState)
for i in seq:
NewState[i]= str(self.TRUTH_TAB[i][tuple([int(NewState[index]) for index in self.REG_NODES[i]])])
#NewState = [str(self.TRUTH_TAB[i][tuple([int(NewState[j]) for j in self.REG_NODES[i]])]) for i in seq]
return ''.join(NewState)
def GenInitial():
initial_state=[]
for node in sorted(self.MAPPING.keys()):
if node in self.INITIAL:
initial_state.append(str(int(self.INITIAL[node])))
else:
if missing=='random':
initial_state.append(random.choice(['0','1']))
else:
initial_state.append(str(int(missing)))
return ''.join(initial_state)
def StringAdd(xlist,ystring):
return [x+int(y) for x,y in zip(xlist,ystring)]
def divide(x):
return x/rounds
for r in range(rounds):
traj=[]
ini_state=GenInitial()
traj.append(ini_state)
prev=ini_state
collect[0]=StringAdd(collect[0],prev)
for s in range(steps):
if self.INPUT['mode']=='Sync':
next=IterOneSync(prev)
elif self.INPUT['mode']=='GA':
next=IterOneAsync(prev)
elif self.INPUT['mode'] == 'ROA':
next=IterOneROA(prev)
traj.append(next)
collect[s+1]=StringAdd(collect[s+1],next)
prev=next
traj_all.append(traj)
out={}
normalized=[map(divide,each_step) for each_step in collect]
nodes_list=self.GetNodes()
for node_i in range(len(nodes_list)):
out[nodes_list[node_i]]=[state[node_i] for state in normalized]
self.FINAL[nodes_list[node_i]]=out[nodes_list[node_i]][-1]
return out
def plot_result(results,plotlist,marker=True):
import matplotlib.pyplot as plt
'''Plot the simulated results'''
print "Ploting results..."
plotsymbyl=['o','v','*','s','+','p','x','1','2','h','D','.',','] # plot line with symbyl
ploti=0
for items in plotlist: # plot nodes states using matplotlib
if marker:
plt.plot(results[items],label=items,linewidth=2.5,linestyle='-',marker=plotsymbyl[ploti]) #with marker
else: plt.plot(results[items],label=items,linewidth=2.5,linestyle='-') #no marker
ploti += 1
if ploti >= 12: ploti=0
plt.xlabel('Steps',size=15)
plt.ylabel('Percentage',size=15)
plt.yticks([-0.1,0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1],size=15)
plt.xticks(size=15)
plt.legend(prop={'size':15}) # make legend
plt.show() # show plot
#plt.savefig('figure.png',dpi=300)
return
def write_data(results,file_out='data.txt',window=1):
data_out=file(file_out,'w')
for nodes in sorted(results):
data_out.writelines('%-15s'%nodes)
for frequency in results[nodes][1::window]:
data_out.writelines('%-8.2f'%frequency)
data_out.writelines('\n')
data_out.close()
if __name__ == '__main__':
try:
model=Model(sys.argv[1])
except:
model=Model('simu.in')
results=model.IterModel(missing='random')
write_data(results)
plot_result(results,model.INPUT['plot_nodes'],marker=False)
| gpl-3.0 |
alphaBenj/zipline | tests/test_algorithm.py | 1 | 175570 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from collections import namedtuple
import datetime
from datetime import timedelta
from textwrap import dedent
from unittest import skip
from copy import deepcopy
import logbook
import toolz
from logbook import TestHandler, WARNING
from mock import MagicMock
from nose_parameterized import parameterized
from six import iteritems, itervalues, string_types
from six.moves import range
from testfixtures import TempDirectory
import numpy as np
import pandas as pd
import pytz
from pandas.core.common import PerformanceWarning
from zipline import run_algorithm
from zipline import TradingAlgorithm
from zipline.api import FixedSlippage
from zipline.assets import Equity, Future, Asset
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.assets.synthetic import (
make_jagged_equity_info,
make_simple_equity_info,
)
from zipline.data.data_portal import DataPortal
from zipline.data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY,
)
from zipline.data.us_equity_pricing import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
)
from zipline.errors import (
AccountControlViolation,
CannotOrderDelistedAsset,
IncompatibleSlippageModel,
OrderDuringInitialize,
OrderInBeforeTradingStart,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetCancelPolicyPostInit,
SymbolNotFound,
TradingControlViolation,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
)
from zipline.api import (
order,
order_value,
order_percent,
order_target,
order_target_value,
order_target_percent
)
from zipline.finance.commission import PerShare
from zipline.finance.execution import LimitOrder
from zipline.finance.order import ORDER_STATUS
from zipline.finance.trading import SimulationParameters
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
StaticRestrictions,
RESTRICTION_STATES,
)
from zipline.testing import (
FakeDataPortal,
copy_market_data,
create_daily_df_for_asset,
create_data_portal,
create_data_portal_from_trade_history,
create_minute_df_for_asset,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
str_to_seconds,
tmp_trading_env,
to_utc,
trades_by_sid_to_dfs,
tmp_dir,
)
from zipline.testing import RecordBatchBlotter
from zipline.testing.fixtures import (
WithDataPortal,
WithLogger,
WithSimParams,
WithTradingEnvironment,
WithTmpDir,
ZiplineTestCase,
)
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
AmbitiousStopLimitAlgorithm,
EmptyPositionsAlgorithm,
InvalidOrderAlgorithm,
RecordAlgorithm,
FutureFlipAlgo,
TestOrderAlgorithm,
TestOrderPercentAlgorithm,
TestOrderStyleForwardingAlgorithm,
TestOrderValueAlgorithm,
TestPositionWeightsAlgorithm,
TestRegisterTransformAlgorithm,
TestTargetAlgorithm,
TestTargetPercentAlgorithm,
TestTargetValueAlgorithm,
SetLongOnlyAlgorithm,
SetAssetDateBoundsAlgorithm,
SetMaxPositionSizeAlgorithm,
SetMaxOrderCountAlgorithm,
SetMaxOrderSizeAlgorithm,
SetDoNotOrderListAlgorithm,
SetAssetRestrictionsAlgorithm,
SetMultipleAssetRestrictionsAlgorithm,
SetMaxLeverageAlgorithm,
api_algo,
api_get_environment_algo,
api_symbol_algo,
call_all_order_methods,
call_order_in_init,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
call_with_kwargs,
call_without_kwargs,
call_with_bad_kwargs_current,
call_with_bad_kwargs_history,
bad_type_history_assets,
bad_type_history_fields,
bad_type_history_bar_count,
bad_type_history_frequency,
bad_type_history_assets_kwarg_list,
bad_type_current_assets,
bad_type_current_fields,
bad_type_can_trade_assets,
bad_type_is_stale_assets,
bad_type_history_assets_kwarg,
bad_type_history_fields_kwarg,
bad_type_history_bar_count_kwarg,
bad_type_history_frequency_kwarg,
bad_type_current_assets_kwarg,
bad_type_current_fields_kwarg,
call_with_bad_kwargs_get_open_orders,
call_with_good_kwargs_get_open_orders,
call_with_no_kwargs_get_open_orders,
empty_positions,
set_benchmark_algo,
no_handle_data,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.api_support import ZiplineAPI, set_algo_instance
from zipline.utils.calendars import get_calendar, register_calendar
from zipline.utils.context_tricks import CallbackManager
from zipline.utils.control_flow import nullctx
import zipline.utils.events
from zipline.utils.events import date_rules, time_rules, Always
import zipline.utils.factory as factory
# Because test cases appear to reuse some resources.
_multiprocess_can_split_ = False
class TestRecordAlgorithm(WithSimParams, WithDataPortal, ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = 133,
def test_record_incr(self):
algo = RecordAlgorithm(sim_params=self.sim_params, env=self.env)
output = algo.run(self.data_portal)
np.testing.assert_array_equal(output['incr'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name2'].values,
[2] * len(output))
np.testing.assert_array_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(WithLogger,
WithSimParams,
WithDataPortal,
ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-03', tz='UTC')
END_DATE = pd.Timestamp('2006-01-04', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
sids = 1, 2
@classmethod
def make_equity_info(cls):
return pd.concat((
make_simple_equity_info(cls.sids, '2002-02-1', '2007-01-01'),
pd.DataFrame.from_dict(
{3: {'symbol': 'PLAY',
'start_date': '2002-01-01',
'end_date': '2004-01-01',
'exchange': 'TEST'},
4: {'symbol': 'PLAY',
'start_date': '2005-01-01',
'end_date': '2006-01-01',
'exchange': 'TEST'}},
orient='index',
),
))
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
5: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'TEST'
},
6: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'TEST',
},
7: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC'),
'exchange': 'TEST',
},
8: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC'),
'exchange': 'TEST',
}
},
orient='index',
)
def test_cancel_policy_outside_init(self):
code = """
from zipline.api import cancel_policy, set_cancel_policy
def initialize(algo):
pass
def handle_data(algo, data):
set_cancel_policy(cancel_policy.NeverCancel())
"""
algo = TradingAlgorithm(script=code,
sim_params=self.sim_params,
env=self.env)
with self.assertRaises(SetCancelPolicyPostInit):
algo.run(self.data_portal)
def test_cancel_policy_invalid_param(self):
code = """
from zipline.api import set_cancel_policy
def initialize(algo):
set_cancel_policy("foo")
def handle_data(algo, data):
pass
"""
algo = TradingAlgorithm(script=code,
sim_params=self.sim_params,
env=self.env)
with self.assertRaises(UnsupportedCancelPolicy):
algo.run(self.data_portal)
def test_zipline_api_resolves_dynamically(self):
# Make a dummy algo.
algo = TradingAlgorithm(
initialize=lambda context: None,
handle_data=lambda context, data: None,
sim_params=self.sim_params,
env=self.env,
)
# Verify that api methods get resolved dynamically by patching them out
# and then calling them
for method in algo.all_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_sid_datetime(self):
algo_text = """
from zipline.api import sid, get_datetime
def initialize(context):
pass
def handle_data(context, data):
aapl_dt = data.current(sid(1), "last_traded")
assert_equal(aapl_dt, get_datetime())
"""
algo = TradingAlgorithm(script=algo_text,
sim_params=self.sim_params,
env=self.env)
algo.namespace['assert_equal'] = self.assertEqual
algo.run(self.data_portal)
def test_datetime_bad_params(self):
algo_text = """
from zipline.api import get_datetime
from pytz import timezone
def initialize(context):
pass
def handle_data(context, data):
get_datetime(timezone)
"""
with self.assertRaises(TypeError):
algo = TradingAlgorithm(script=algo_text,
sim_params=self.sim_params,
env=self.env)
algo.run(self.data_portal)
def test_get_environment(self):
expected_env = {
'arena': 'backtest',
'data_frequency': 'minute',
'start': pd.Timestamp('2006-01-03 14:31:00+0000', tz='utc'),
'end': pd.Timestamp('2006-01-04 21:00:00+0000', tz='utc'),
'capital_base': 100000.0,
'platform': 'zipline'
}
def initialize(algo):
self.assertEqual('zipline', algo.get_environment())
self.assertEqual(expected_env, algo.get_environment('*'))
def handle_data(algo, data):
pass
algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data,
sim_params=self.sim_params,
env=self.env)
algo.run(self.data_portal)
def test_get_open_orders(self):
def initialize(algo):
algo.minute = 0
def handle_data(algo, data):
if algo.minute == 0:
# Should be filled by the next minute
algo.order(algo.sid(1), 1)
# Won't be filled because the price is too low.
algo.order(algo.sid(2), 1, style=LimitOrder(0.01))
algo.order(algo.sid(2), 1, style=LimitOrder(0.01))
algo.order(algo.sid(2), 1, style=LimitOrder(0.01))
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [1, 2])
self.assertEqual(all_orders[1], algo.get_open_orders(1))
self.assertEqual(len(all_orders[1]), 1)
self.assertEqual(all_orders[2], algo.get_open_orders(2))
self.assertEqual(len(all_orders[2]), 3)
if algo.minute == 1:
# First order should have filled.
# Second order should still be open.
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [2])
self.assertEqual([], algo.get_open_orders(1))
orders_2 = algo.get_open_orders(2)
self.assertEqual(all_orders[2], orders_2)
self.assertEqual(len(all_orders[2]), 3)
for order_ in orders_2:
algo.cancel_order(order_)
all_orders = algo.get_open_orders()
self.assertEqual(all_orders, {})
algo.minute += 1
algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data,
sim_params=self.sim_params,
env=self.env)
algo.run(self.data_portal)
def test_schedule_function_custom_cal(self):
# run a simulation on the CME cal, and schedule a function
# using the NYSE cal
algotext = """
from zipline.api import (
schedule_function, get_datetime, time_rules, date_rules, calendars,
)
def initialize(context):
schedule_function(
func=log_nyse_open,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
calendar=calendars.US_EQUITIES,
)
schedule_function(
func=log_nyse_close,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(),
calendar=calendars.US_EQUITIES,
)
context.nyse_opens = []
context.nyse_closes = []
def log_nyse_open(context, data):
context.nyse_opens.append(get_datetime())
def log_nyse_close(context, data):
context.nyse_closes.append(get_datetime())
"""
algo = TradingAlgorithm(
script=algotext,
sim_params=self.sim_params,
env=self.env,
trading_calendar=get_calendar("CME")
)
algo.run(self.data_portal)
nyse = get_calendar("NYSE")
for minute in algo.nyse_opens:
# each minute should be a nyse session open
session_label = nyse.minute_to_session_label(minute)
session_open = nyse.session_open(session_label)
self.assertEqual(session_open, minute)
for minute in algo.nyse_closes:
# each minute should be a minute before a nyse session close
session_label = nyse.minute_to_session_label(minute)
session_close = nyse.session_close(session_label)
self.assertEqual(session_close - timedelta(minutes=1), minute)
# Test that passing an invalid calendar parameter raises an error.
erroring_algotext = dedent(
"""
from zipline.api import schedule_function
from zipline.utils.calendars import get_calendar
def initialize(context):
schedule_function(func=my_func, calendar=get_calendar('NYSE'))
def my_func(context, data):
pass
"""
)
algo = TradingAlgorithm(
script=erroring_algotext,
sim_params=self.sim_params,
env=self.env,
trading_calendar=get_calendar('CME'),
)
with self.assertRaises(ScheduleFunctionInvalidCalendar):
algo.run(self.data_portal)
def test_schedule_function(self):
us_eastern = pytz.timezone('US/Eastern')
def incrementer(algo, data):
algo.func_called += 1
curdt = algo.get_datetime().tz_convert(pytz.utc)
self.assertEqual(
curdt,
us_eastern.localize(
datetime.datetime.combine(
curdt.date(),
datetime.time(9, 31)
),
),
)
def initialize(algo):
algo.func_called = 0
algo.days = 1
algo.date = None
algo.schedule_function(
func=incrementer,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
)
def handle_data(algo, data):
if not algo.date:
algo.date = algo.get_datetime().date()
if algo.date < algo.get_datetime().date():
algo.days += 1
algo.date = algo.get_datetime().date()
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
sim_params=self.sim_params,
env=self.env,
)
algo.run(self.data_portal)
self.assertEqual(algo.func_called, algo.days)
def test_event_context(self):
expected_data = []
collected_data_pre = []
collected_data_post = []
function_stack = []
def pre(data):
function_stack.append(pre)
collected_data_pre.append(data)
def post(data):
function_stack.append(post)
collected_data_post.append(data)
def initialize(context):
context.add_event(Always(), f)
context.add_event(Always(), g)
def handle_data(context, data):
function_stack.append(handle_data)
expected_data.append(data)
def f(context, data):
function_stack.append(f)
def g(context, data):
function_stack.append(g)
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
sim_params=self.sim_params,
create_event_context=CallbackManager(pre, post),
env=self.env,
)
algo.run(self.data_portal)
self.assertEqual(len(expected_data), 780)
self.assertEqual(collected_data_pre, expected_data)
self.assertEqual(collected_data_post, expected_data)
self.assertEqual(
len(function_stack),
3900,
'Incorrect number of functions called: %s != 3900' %
len(function_stack),
)
expected_functions = [pre, handle_data, f, g, post] * 97530
for n, (f, g) in enumerate(zip(function_stack, expected_functions)):
self.assertEqual(
f,
g,
'function at position %d was incorrect, expected %s but got %s'
% (n, g.__name__, f.__name__),
)
@parameterized.expand([
('daily',),
('minute'),
])
def test_schedule_function_rule_creation(self, mode):
def nop(*args, **kwargs):
return None
self.sim_params.data_frequency = mode
algo = TradingAlgorithm(
initialize=nop,
handle_data=nop,
sim_params=self.sim_params,
env=self.env,
)
# Schedule something for NOT Always.
algo.schedule_function(nop, time_rule=zipline.utils.events.Never())
event_rule = algo.event_manager._events[1].rule
self.assertIsInstance(event_rule, zipline.utils.events.OncePerDay)
inner_rule = event_rule.rule
self.assertIsInstance(inner_rule, zipline.utils.events.ComposedRule)
first = inner_rule.first
second = inner_rule.second
composer = inner_rule.composer
self.assertIsInstance(first, zipline.utils.events.Always)
if mode == 'daily':
self.assertIsInstance(second, zipline.utils.events.Always)
else:
self.assertIsInstance(second, zipline.utils.events.Never)
self.assertIs(composer, zipline.utils.events.ComposedRule.lazy_and)
def test_asset_lookup(self):
algo = TradingAlgorithm(env=self.env)
# this date doesn't matter
start_session = pd.Timestamp("2000-01-01", tz="UTC")
# Test before either PLAY existed
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2001-12-01', tz='UTC')
)
with self.assertRaises(SymbolNotFound):
algo.symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.symbols('PLAY')
# Test when first PLAY exists
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2002-12-01', tz='UTC')
)
list_result = algo.symbols('PLAY')
self.assertEqual(3, list_result[0])
# Test after first PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2004-12-01', tz='UTC')
)
self.assertEqual(3, algo.symbol('PLAY'))
# Test after second PLAY begins
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2005-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
# Test after second PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2006-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
list_result = algo.symbols('PLAY')
self.assertEqual(4, list_result[0])
# Test lookup SID
self.assertIsInstance(algo.sid(3), Equity)
self.assertIsInstance(algo.sid(4), Equity)
# Supplying a non-string argument to symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.symbol(1)
with self.assertRaises(TypeError):
algo.symbol((1,))
with self.assertRaises(TypeError):
algo.symbol({1})
with self.assertRaises(TypeError):
algo.symbol([1])
with self.assertRaises(TypeError):
algo.symbol({'foo': 'bar'})
def test_future_symbol(self):
""" Tests the future_symbol API function.
"""
algo = TradingAlgorithm(env=self.env)
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
# Check that we get the correct fields for the CLG06 symbol
cl = algo.future_symbol('CLG06')
self.assertEqual(cl.sid, 5)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
algo.future_symbol('')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('FOOBAR')
# Supplying a non-string argument to future_symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.future_symbol(1)
with self.assertRaises(TypeError):
algo.future_symbol((1,))
with self.assertRaises(TypeError):
algo.future_symbol({1})
with self.assertRaises(TypeError):
algo.future_symbol([1])
with self.assertRaises(TypeError):
algo.future_symbol({'foo': 'bar'})
def test_set_symbol_lookup_date(self):
"""
Test the set_symbol_lookup_date API method.
"""
# Note we start sid enumeration at i+3 so as not to
# collide with sids [1, 2] added in the setUp() method.
dates = pd.date_range('2013-01-01', freq='2D', periods=2, tz='UTC')
# Create two assets with the same symbol but different
# non-overlapping date ranges.
metadata = pd.DataFrame.from_records(
[
{
'sid': i + 3,
'symbol': 'DUP',
'start_date': date.value,
'end_date': (date + timedelta(days=1)).value,
'exchange': 'TEST',
}
for i, date in enumerate(dates)
]
)
with tmp_trading_env(equities=metadata,
load=self.make_load_function()) as env:
algo = TradingAlgorithm(env=env)
# Set the period end to a date after the period end
# dates for our assets.
algo.sim_params = algo.sim_params.create_new(
algo.sim_params.start_session,
pd.Timestamp('2015-01-01', tz='UTC')
)
# With no symbol lookup date set, we will use the period end date
# for the as_of_date, resulting here in the asset with the earlier
# start date being returned.
result = algo.symbol('DUP')
self.assertEqual(result.symbol, 'DUP')
# By first calling set_symbol_lookup_date, the relevant asset
# should be returned by lookup_symbol
for i, date in enumerate(dates):
algo.set_symbol_lookup_date(date)
result = algo.symbol('DUP')
self.assertEqual(result.symbol, 'DUP')
self.assertEqual(result.sid, i + 3)
with self.assertRaises(UnsupportedDatetimeFormat):
algo.set_symbol_lookup_date('foobar')
class TestTransformAlgorithm(WithLogger,
WithDataPortal,
WithSimParams,
ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-01-06', tz='utc')
sids = ASSET_FINDER_EQUITY_SIDS = [0, 1, 133]
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict({
3: {
'multiplier': 10,
'symbol': 'F',
'exchange': 'TEST'
}
}, orient='index')
@classmethod
def make_equity_daily_bar_data(cls):
return trades_by_sid_to_dfs(
{
sid: factory.create_trade_history(
sid,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
cls.sim_params,
cls.trading_calendar,
) for sid in cls.sids
},
index=cls.sim_params.sessions,
)
@classmethod
def init_class_fixtures(cls):
super(TestTransformAlgorithm, cls).init_class_fixtures()
cls.futures_env = cls.enter_class_context(
tmp_trading_env(futures=cls.make_futures_info(),
load=cls.make_load_function()),
)
def test_invalid_order_parameters(self):
algo = InvalidOrderAlgorithm(
sids=[133],
sim_params=self.sim_params,
env=self.env,
)
algo.run(self.data_portal)
@parameterized.expand([
(order, 1),
(order_value, 1000),
(order_target, 1),
(order_target_value, 1000),
(order_percent, 1),
(order_target_percent, 1),
])
def test_cannot_order_in_before_trading_start(self, order_method, amount):
algotext = """
from zipline.api import sid
from zipline.api import {order_func}
def initialize(context):
context.asset = sid(133)
def before_trading_start(context, data):
{order_func}(context.asset, {arg})
""".format(order_func=order_method.__name__, arg=amount)
algo = TradingAlgorithm(script=algotext, sim_params=self.sim_params,
data_frequency='daily', env=self.env)
with self.assertRaises(OrderInBeforeTradingStart):
algo.run(self.data_portal)
def test_run_twice(self):
algo1 = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
sids=[0, 1],
env=self.env,
)
res1 = algo1.run(self.data_portal)
# Create a new trading algorithm, which will
# use the newly instantiated environment.
algo2 = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
sids=[0, 1],
env=self.env,
)
res2 = algo2.run(self.data_portal)
# There are some np.NaN values in the first row because there is not
# enough data to calculate the metric, e.g. beta.
res1 = res1.fillna(value=0)
res2 = res2.fillna(value=0)
np.testing.assert_array_equal(res1, res2)
def test_data_frequency_setting(self):
self.sim_params.data_frequency = 'daily'
sim_params = factory.create_simulation_parameters(
num_days=4, data_frequency='daily')
algo = TestRegisterTransformAlgorithm(
sim_params=sim_params,
env=self.env,
)
self.assertEqual(algo.sim_params.data_frequency, 'daily')
sim_params = factory.create_simulation_parameters(
num_days=4, data_frequency='minute')
algo = TestRegisterTransformAlgorithm(
sim_params=sim_params,
env=self.env,
)
self.assertEqual(algo.sim_params.data_frequency, 'minute')
def test_order_rounding(self):
answer_key = [
(0, 0),
(10, 10),
(1.1, 1),
(1.5, 1),
(1.9998, 1),
(1.99991, 2),
]
for input, answer in answer_key:
self.assertEqual(
answer,
TradingAlgorithm.round_order(input)
)
self.assertEqual(
-1 * answer,
TradingAlgorithm.round_order(-1 * input)
)
@parameterized.expand([
('order', TestOrderAlgorithm,),
('order_value', TestOrderValueAlgorithm,),
('order_target', TestTargetAlgorithm,),
('order_percent', TestOrderPercentAlgorithm,),
('order_target_percent', TestTargetPercentAlgorithm,),
('order_target_value', TestTargetValueAlgorithm,),
])
def test_order_methods(self, test_name, algo_class):
algo = algo_class(
sim_params=self.sim_params,
env=self.env,
)
# Ensure that the environment's asset 0 is an Equity
asset_to_test = algo.sid(0)
self.assertIsInstance(asset_to_test, Equity)
algo.run(self.data_portal)
@parameterized.expand([
(TestOrderAlgorithm,),
(TestOrderValueAlgorithm,),
(TestTargetAlgorithm,),
(TestOrderPercentAlgorithm,),
(TestTargetValueAlgorithm,),
])
def test_order_methods_for_future(self, algo_class):
algo = algo_class(
sim_params=self.sim_params,
env=self.env,
)
# Ensure that the environment's asset 3 is a Future
asset_to_test = algo.sid(3)
self.assertIsInstance(asset_to_test, Future)
algo.run(self.data_portal)
@parameterized.expand([
("order",),
("order_value",),
("order_percent",),
("order_target",),
("order_target_percent",),
("order_target_value",),
])
def test_order_method_style_forwarding(self, order_style):
algo = TestOrderStyleForwardingAlgorithm(
sim_params=self.sim_params,
method_name=order_style,
env=self.env
)
algo.run(self.data_portal)
def test_order_on_each_day_of_asset_lifetime(self):
algo_code = dedent("""
from zipline.api import sid, schedule_function, date_rules, order
def initialize(context):
schedule_function(order_it, date_rule=date_rules.every_day())
def order_it(context, data):
order(sid(133), 1)
def handle_data(context, data):
pass
""")
asset133 = self.env.asset_finder.retrieve_asset(133)
sim_params = SimulationParameters(
start_session=asset133.start_date,
end_session=asset133.end_date,
data_frequency="minute",
trading_calendar=self.trading_calendar
)
algo = TradingAlgorithm(
script=algo_code,
sim_params=sim_params,
env=self.env
)
results = algo.run(FakeDataPortal(self.env))
for orders_for_day in results.orders:
self.assertEqual(1, len(orders_for_day))
self.assertEqual(orders_for_day[0]["status"], ORDER_STATUS.FILLED)
for txns_for_day in results.transactions:
self.assertEqual(1, len(txns_for_day))
self.assertEqual(1, txns_for_day[0]["amount"])
@parameterized.expand([
(TestOrderAlgorithm,),
(TestOrderValueAlgorithm,),
(TestTargetAlgorithm,),
(TestOrderPercentAlgorithm,)
])
def test_minute_data(self, algo_class):
start_session = pd.Timestamp('2002-1-2', tz='UTC')
period_end = pd.Timestamp('2002-1-4', tz='UTC')
equities = pd.DataFrame([{
'start_date': start_session,
'end_date': period_end + timedelta(days=1),
'exchange': "TEST",
}] * 2)
equities['symbol'] = ['A', 'B']
with TempDirectory() as tempdir, \
tmp_trading_env(equities=equities,
load=self.make_load_function()) as env:
sim_params = SimulationParameters(
start_session=start_session,
end_session=period_end,
capital_base=1.0e5,
data_frequency='minute',
trading_calendar=self.trading_calendar,
)
data_portal = create_data_portal(
env.asset_finder,
tempdir,
sim_params,
equities.index,
self.trading_calendar,
)
algo = algo_class(sim_params=sim_params, env=env)
algo.run(data_portal)
class TestPositions(WithLogger,
WithDataPortal,
WithSimParams,
ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-01-06', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 1000
ASSET_FINDER_EQUITY_SIDS = (1, 133)
@classmethod
def make_equity_daily_bar_data(cls):
frame = pd.DataFrame(
{
'open': [90, 95, 100, 105],
'high': [90, 95, 100, 105],
'low': [90, 95, 100, 105],
'close': [90, 95, 100, 105],
'volume': 100,
},
index=cls.equity_daily_bar_days,
)
return ((sid, frame) for sid in cls.asset_finder.equities_sids)
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
1000: {
'symbol': 'CLF06',
'root_symbol': 'CL',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'auto_close_date': cls.END_DATE + cls.trading_calendar.day,
'exchange': 'CME',
'multiplier': 100,
},
},
orient='index',
)
@classmethod
def make_future_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Future]
sids = cls.asset_finder.futures_sids
minutes = trading_calendar.minutes_for_sessions_in_range(
cls.future_minute_bar_days[0],
cls.future_minute_bar_days[-1],
)
frame = pd.DataFrame(
{
'open': 2.0,
'high': 2.0,
'low': 2.0,
'close': 2.0,
'volume': 100,
},
index=minutes,
)
return ((sid, frame) for sid in sids)
def test_empty_portfolio(self):
algo = EmptyPositionsAlgorithm(self.asset_finder.equities_sids,
sim_params=self.sim_params,
env=self.env)
daily_stats = algo.run(self.data_portal)
expected_position_count = [
0, # Before entering the first position
2, # After entering, exiting on this date
0, # After exiting
0,
]
for i, expected in enumerate(expected_position_count):
self.assertEqual(daily_stats.ix[i]['num_positions'],
expected)
def test_noop_orders(self):
algo = AmbitiousStopLimitAlgorithm(sid=1,
sim_params=self.sim_params,
env=self.env)
daily_stats = algo.run(self.data_portal)
# Verify that positions are empty for all dates.
empty_positions = daily_stats.positions.map(lambda x: len(x) == 0)
self.assertTrue(empty_positions.all())
def test_position_weights(self):
sids = (1, 133, 1000)
equity_1, equity_133, future_1000 = \
self.asset_finder.retrieve_all(sids)
algo = TestPositionWeightsAlgorithm(
sids_and_amounts=zip(sids, [2, -1, 1]),
sim_params=self.sim_params,
env=self.env,
)
daily_stats = algo.run(self.data_portal)
expected_position_weights = [
# No positions held on the first day.
pd.Series({}),
# Each equity's position value is its price times the number of
# shares held. In this example, we hold a long position in 2 shares
# of equity_1 so its weight is (95.0 * 2) = 190.0 divided by the
# total portfolio value. The total portfolio value is the sum of
# cash ($905.00) plus the value of all equity positions.
#
# For a futures contract, its weight is the unit price times number
# of shares held times the multiplier. For future_1000, this is
# (2.0 * 1 * 100) = 200.0 divided by total portfolio value.
pd.Series({
equity_1: 190.0 / (190.0 - 95.0 + 905.0),
equity_133: -95.0 / (190.0 - 95.0 + 905.0),
future_1000: 200.0 / (190.0 - 95.0 + 905.0),
}),
pd.Series({
equity_1: 200.0 / (200.0 - 100.0 + 905.0),
equity_133: -100.0 / (200.0 - 100.0 + 905.0),
future_1000: 200.0 / (200.0 - 100.0 + 905.0),
}),
pd.Series({
equity_1: 210.0 / (210.0 - 105.0 + 905.0),
equity_133: -105.0 / (210.0 - 105.0 + 905.0),
future_1000: 200.0 / (210.0 - 105.0 + 905.0),
}),
]
for i, expected in enumerate(expected_position_weights):
assert_equal(daily_stats.iloc[i]['position_weights'], expected)
class TestBeforeTradingStart(WithDataPortal,
WithSimParams,
ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-06', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 10000
SIM_PARAMS_DATA_FREQUENCY = 'minute'
EQUITY_DAILY_BAR_LOOKBACK_DAYS = EQUITY_MINUTE_BAR_LOOKBACK_DAYS = 1
DATA_PORTAL_FIRST_TRADING_DAY = pd.Timestamp("2016-01-05", tz='UTC')
EQUITY_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
FUTURE_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
data_start = ASSET_FINDER_EQUITY_START_DATE = pd.Timestamp(
'2016-01-05',
tz='utc',
)
SPLIT_ASSET_SID = 3
ASSET_FINDER_EQUITY_SIDS = 1, 2, SPLIT_ASSET_SID
@classmethod
def make_equity_minute_bar_data(cls):
asset_minutes = \
cls.trading_calendar.minutes_in_range(
cls.data_start,
cls.END_DATE,
)
minutes_count = len(asset_minutes)
minutes_arr = np.arange(minutes_count) + 1
split_data = pd.DataFrame(
{
'open': minutes_arr + 1,
'high': minutes_arr + 2,
'low': minutes_arr - 1,
'close': minutes_arr,
'volume': 100 * minutes_arr,
},
index=asset_minutes,
)
split_data.iloc[780:] = split_data.iloc[780:] / 2.0
for sid in (1, 8554):
yield sid, create_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.sim_params.end_session,
)
yield 2, create_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.sim_params.end_session,
50,
)
yield cls.SPLIT_ASSET_SID, split_data
@classmethod
def make_splits_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds('2016-01-07'),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
}
])
@classmethod
def make_equity_daily_bar_data(cls):
for sid in cls.ASSET_FINDER_EQUITY_SIDS:
yield sid, create_daily_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.sim_params.end_session,
)
def test_data_in_bts_minute(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.append(data.history(
[sid(1), sid(2)],
["price", "high"],
60,
"1m"
))
def handle_data(context, data):
pass
""")
algo = TradingAlgorithm(
script=algo_code,
sim_params=self.sim_params,
env=self.env
)
results = algo.run(self.data_portal)
# fetching data at midnight gets us the previous market minute's data
self.assertEqual(390, results.iloc[0].the_price1)
self.assertEqual(392, results.iloc[0].the_high1)
# make sure that price is ffilled, but not other fields
self.assertEqual(350, results.iloc[0].the_price2)
self.assertTrue(np.isnan(results.iloc[0].the_high2))
# 10-minute history
# asset1 day1 price should be 331-390
np.testing.assert_array_equal(
range(331, 391), algo.history_values[0]["price"][1]
)
# asset1 day1 high should be 333-392
np.testing.assert_array_equal(
range(333, 393), algo.history_values[0]["high"][1]
)
# asset2 day1 price should be 19 300s, then 40 350s
np.testing.assert_array_equal(
[300] * 19, algo.history_values[0]["price"][2][0:19]
)
np.testing.assert_array_equal(
[350] * 40, algo.history_values[0]["price"][2][20:]
)
# asset2 day1 high should be all NaNs except for the 19th item
# = 2016-01-05 20:20:00+00:00
np.testing.assert_array_equal(
np.full(19, np.nan), algo.history_values[0]["high"][2][0:19]
)
self.assertEqual(352, algo.history_values[0]["high"][2][19])
np.testing.assert_array_equal(
np.full(40, np.nan), algo.history_values[0]["high"][2][20:]
)
def test_data_in_bts_daily(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.append(data.history(
[sid(1), sid(2)],
["price", "high"],
1,
"1m"
))
def handle_data(context, data):
pass
""")
algo = TradingAlgorithm(
script=algo_code,
sim_params=self.sim_params,
env=self.env
)
results = algo.run(self.data_portal)
self.assertEqual(392, results.the_high1[0])
self.assertEqual(390, results.the_price1[0])
# nan because asset2 only trades every 50 minutes
self.assertTrue(np.isnan(results.the_high2[0]))
self.assertTrue(350, results.the_price2[0])
self.assertEqual(392, algo.history_values[0]["high"][1][0])
self.assertEqual(390, algo.history_values[0]["price"][1][0])
self.assertTrue(np.isnan(algo.history_values[0]["high"][2][0]))
self.assertEqual(350, algo.history_values[0]["price"][2][0])
def test_portfolio_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data
assert (context.hd_portfolio == bts_portfolio)
record(pos_value=bts_portfolio.positions_value)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
algo = TradingAlgorithm(
script=algo_code,
data_frequency="minute",
sim_params=self.sim_params,
env=self.env
)
results = algo.run(self.data_portal)
# Asset starts with price 1 on 1/05 and increases by 1 every minute.
# Simulation starts on 1/06, where the price in bts is 390, and
# positions_value is 0. On 1/07, price is 780, and after buying one
# share on the first bar of 1/06, positions_value is 780
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
def test_account_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_account = context.account
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=context.account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
algo = TradingAlgorithm(
script=algo_code,
data_frequency="minute",
sim_params=self.sim_params,
env=self.env
)
results = algo.run(self.data_portal)
# Starting portfolio value is 10000. Order for the asset fills on the
# second bar of 1/06, where the price is 391, and costs the default
# commission of 0. On 1/07, the price is 780, and the increase in
# portfolio value is 780-392-0
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0,
places=2)
def test_portfolio_bts_with_overnight_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data, except for the positions
for k in bts_portfolio.__dict__:
if k != 'positions':
assert (context.hd_portfolio.__dict__[k]
== bts_portfolio.__dict__[k])
record(pos_value=bts_portfolio.positions_value)
record(pos_amount=bts_portfolio.positions[sid(3)].amount)
record(
last_sale_price=bts_portfolio.positions[sid(3)].last_sale_price
)
def handle_data(context, data):
if not context.ordered:
order(sid(3), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
algo = TradingAlgorithm(
script=algo_code,
data_frequency="minute",
sim_params=self.sim_params,
env=self.env
)
results = algo.run(self.data_portal)
# On 1/07, positions value should by 780, same as without split
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
# On 1/07, after applying the split, 1 share becomes 2
self.assertEqual(results.pos_amount.iloc[0], 0)
self.assertEqual(results.pos_amount.iloc[1], 2)
# On 1/07, after applying the split, last sale price is halved
self.assertEqual(results.last_sale_price.iloc[0], 0)
self.assertEqual(results.last_sale_price.iloc[1], 390)
def test_account_bts_with_overnight_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_account = context.account
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=bts_account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
algo = TradingAlgorithm(
script=algo_code,
data_frequency="minute",
sim_params=self.sim_params,
env=self.env
)
results = algo.run(self.data_portal)
# On 1/07, portfolio value is the same as without split
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0, places=2)
class TestAlgoScript(WithLogger,
WithDataPortal,
WithSimParams,
ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-12-31', tz='utc')
DATA_PORTAL_USE_MINUTE_DATA = False
EQUITY_DAILY_BAR_LOOKBACK_DAYS = 5 # max history window length
STRING_TYPE_NAMES = [s.__name__ for s in string_types]
STRING_TYPE_NAMES_STRING = ', '.join(STRING_TYPE_NAMES)
ASSET_TYPE_NAME = Asset.__name__
CONTINUOUS_FUTURE_NAME = ContinuousFuture.__name__
ASSET_OR_STRING_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME] +
STRING_TYPE_NAMES)
ASSET_OR_STRING_OR_CF_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME,
CONTINUOUS_FUTURE_NAME] +
STRING_TYPE_NAMES)
ARG_TYPE_TEST_CASES = (
('history__assets', (bad_type_history_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history__fields', (bad_type_history_fields,
STRING_TYPE_NAMES_STRING,
True)),
('history__bar_count', (bad_type_history_bar_count, 'int', False)),
('history__frequency', (bad_type_history_frequency,
STRING_TYPE_NAMES_STRING,
False)),
('current__assets', (bad_type_current_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current__fields', (bad_type_current_fields,
STRING_TYPE_NAMES_STRING,
True)),
('is_stale__assets', (bad_type_is_stale_assets, 'Asset', True)),
('can_trade__assets', (bad_type_can_trade_assets, 'Asset', True)),
('history_kwarg__assets',
(bad_type_history_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg_bad_list__assets',
(bad_type_history_assets_kwarg_list,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg__fields',
(bad_type_history_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
('history_kwarg__bar_count',
(bad_type_history_bar_count_kwarg, 'int', False)),
('history_kwarg__frequency',
(bad_type_history_frequency_kwarg, STRING_TYPE_NAMES_STRING, False)),
('current_kwarg__assets',
(bad_type_current_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current_kwarg__fields',
(bad_type_current_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
)
sids = 0, 1, 3, 133
@classmethod
def make_equity_info(cls):
register_calendar("TEST", get_calendar("NYSE"), force=True)
data = make_simple_equity_info(
cls.sids,
cls.START_DATE,
cls.END_DATE,
)
data.loc[3, 'symbol'] = 'TEST'
return data
@classmethod
def make_equity_daily_bar_data(cls):
days = len(cls.equity_daily_bar_days)
return trades_by_sid_to_dfs(
{
0: factory.create_trade_history(
0,
[10.0] * days,
[100] * days,
timedelta(days=1),
cls.sim_params,
cls.trading_calendar),
3: factory.create_trade_history(
3,
[10.0] * days,
[100] * days,
timedelta(days=1),
cls.sim_params,
cls.trading_calendar)
},
index=cls.equity_daily_bar_days,
)
def test_noop(self):
algo = TradingAlgorithm(initialize=initialize_noop,
handle_data=handle_data_noop,
env=self.env)
algo.run(self.data_portal)
def test_noop_string(self):
algo = TradingAlgorithm(script=noop_algo, env=self.env)
algo.run(self.data_portal)
def test_no_handle_data(self):
algo = TradingAlgorithm(script=no_handle_data, env=self.env)
algo.run(self.data_portal)
def test_api_calls(self):
algo = TradingAlgorithm(initialize=initialize_api,
handle_data=handle_data_api,
env=self.env)
algo.run(self.data_portal)
def test_api_calls_string(self):
algo = TradingAlgorithm(script=api_algo, env=self.env)
algo.run(self.data_portal)
def test_api_get_environment(self):
platform = 'zipline'
algo = TradingAlgorithm(script=api_get_environment_algo,
platform=platform,
env=self.env)
algo.run(self.data_portal)
self.assertEqual(algo.environment, platform)
def test_api_symbol(self):
algo = TradingAlgorithm(script=api_symbol_algo,
env=self.env,
sim_params=self.sim_params)
algo.run(self.data_portal)
def test_fixed_slippage(self):
# verify order -> transaction -> portfolio position.
# --------------
test_algo = TradingAlgorithm(
script="""
from zipline.api import (slippage,
commission,
set_slippage,
set_commission,
order,
record,
sid)
def initialize(context):
model = slippage.FixedSlippage(spread=0.10)
set_slippage(model)
set_commission(commission.PerTrade(100.00))
context.count = 1
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
order(sid(0), -1000)
record(price=data.current(sid(0), "price"))
context.incr += 1""",
sim_params=self.sim_params,
env=self.env,
)
results = test_algo.run(self.data_portal)
# flatten the list of txns
all_txns = [val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(all_txns), 1)
txn = all_txns[0]
expected_spread = 0.05
expected_price = test_algo.recorded_vars["price"] - expected_spread
self.assertEqual(expected_price, txn['price'])
# make sure that the $100 commission was applied to our cash
# the txn was for -1000 shares at 9.95, means -9.95k. our capital_used
# for that day was therefore 9.95k, but after the $100 commission,
# it should be 9.85k.
self.assertEqual(9850, results.capital_used[1])
self.assertEqual(100, results["orders"][1][0]["commission"])
@parameterized.expand(
[
('no_minimum_commission', 0,),
('default_minimum_commission', 0,),
('alternate_minimum_commission', 2,),
]
)
def test_volshare_slippage(self, name, minimum_commission):
tempdir = TempDirectory()
try:
if name == "default_minimum_commission":
commission_line = "set_commission(commission.PerShare(0.02))"
else:
commission_line = \
"set_commission(commission.PerShare(0.02, " \
"min_trade_cost={0}))".format(minimum_commission)
# verify order -> transaction -> portfolio position.
# --------------
test_algo = TradingAlgorithm(
script="""
from zipline.api import *
def initialize(context):
model = slippage.VolumeShareSlippage(
volume_limit=.3,
price_impact=0.05
)
set_slippage(model)
{0}
context.count = 2
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
# order small lots to be sure the
# order will fill in a single transaction
order(sid(0), 5000)
record(price=data.current(sid(0), "price"))
record(volume=data.current(sid(0), "volume"))
record(incr=context.incr)
context.incr += 1
""".format(commission_line),
sim_params=self.sim_params,
env=self.env,
)
trades = factory.create_daily_trade_source(
[0], self.sim_params, self.env, self.trading_calendar)
data_portal = create_data_portal_from_trade_history(
self.env.asset_finder, self.trading_calendar, tempdir,
self.sim_params, {0: trades})
results = test_algo.run(data_portal)
all_txns = [
val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(all_txns), 67)
# all_orders are all the incremental versions of the
# orders as each new fill comes in.
all_orders = list(toolz.concat(results['orders']))
if minimum_commission == 0:
# for each incremental version of each order, the commission
# should be its filled amount * 0.02
for order_ in all_orders:
self.assertAlmostEqual(
order_["filled"] * 0.02,
order_["commission"]
)
else:
# the commission should be at least the min_trade_cost
for order_ in all_orders:
if order_["filled"] > 0:
self.assertAlmostEqual(
max(order_["filled"] * 0.02, minimum_commission),
order_["commission"]
)
else:
self.assertEqual(0, order_["commission"])
finally:
tempdir.cleanup()
def test_incorrectly_set_futures_slippage_model(self):
code = dedent(
"""
from zipline.api import set_slippage, slippage
class MySlippage(slippage.FutureSlippageModel):
def process_order(self, data, order):
return data.current(order.asset, 'price'), order.amount
def initialize(context):
set_slippage(MySlippage())
"""
)
test_algo = TradingAlgorithm(
script=code, sim_params=self.sim_params, env=self.env,
)
with self.assertRaises(IncompatibleSlippageModel):
# Passing a futures slippage model as the first argument, which is
# for setting equity models, should fail.
test_algo.run(self.data_portal)
def test_algo_record_vars(self):
test_algo = TradingAlgorithm(
script=record_variables,
sim_params=self.sim_params,
env=self.env,
)
results = test_algo.run(self.data_portal)
for i in range(1, 252):
self.assertEqual(results.iloc[i-1]["incr"], i)
def test_algo_record_allow_mock(self):
"""
Test that values from "MagicMock"ed methods can be passed to record.
Relevant for our basic/validation and methods like history, which
will end up returning a MagicMock instead of a DataFrame.
"""
test_algo = TradingAlgorithm(
script=record_variables,
sim_params=self.sim_params,
env=self.env,
)
set_algo_instance(test_algo)
test_algo.record(foo=MagicMock())
def test_algo_record_nan(self):
test_algo = TradingAlgorithm(
script=record_float_magic % 'nan',
sim_params=self.sim_params,
env=self.env,
)
results = test_algo.run(self.data_portal)
for i in range(1, 252):
self.assertTrue(np.isnan(results.iloc[i-1]["data"]))
def test_order_methods(self):
"""
Only test that order methods can be called without error.
Correct filling of orders is tested in zipline.
"""
test_algo = TradingAlgorithm(
script=call_all_order_methods,
sim_params=self.sim_params,
env=self.env,
)
test_algo.run(self.data_portal)
def test_batch_market_order_matches_multiple_manual_orders(self):
share_counts = pd.Series([50, 100])
multi_blotter = RecordBatchBlotter(self.SIM_PARAMS_DATA_FREQUENCY)
multi_test_algo = TradingAlgorithm(
script=dedent("""\
from collections import OrderedDict
from six import iteritems
from zipline.api import sid, order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
for asset, shares in iteritems(OrderedDict(zip(
context.assets, {share_counts}
))):
order(asset, shares)
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=multi_blotter,
env=self.env,
)
multi_stats = multi_test_algo.run(self.data_portal)
self.assertFalse(multi_blotter.order_batch_called)
batch_blotter = RecordBatchBlotter(self.SIM_PARAMS_DATA_FREQUENCY)
batch_test_algo = TradingAlgorithm(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 2, \
"len(orders) was %s but expected 2" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=batch_blotter,
env=self.env,
)
batch_stats = batch_test_algo.run(self.data_portal)
self.assertTrue(batch_blotter.order_batch_called)
for stats in (multi_stats, batch_stats):
stats.orders = stats.orders.apply(
lambda orders: [toolz.dissoc(o, 'id') for o in orders]
)
stats.transactions = stats.transactions.apply(
lambda txns: [toolz.dissoc(txn, 'order_id') for txn in txns]
)
assert_equal(multi_stats, batch_stats)
def test_batch_market_order_filters_null_orders(self):
share_counts = [50, 0]
batch_blotter = RecordBatchBlotter(self.SIM_PARAMS_DATA_FREQUENCY)
batch_test_algo = TradingAlgorithm(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 1, \
"len(orders) was %s but expected 1" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=share_counts),
blotter=batch_blotter,
env=self.env,
)
batch_test_algo.run(self.data_portal)
self.assertTrue(batch_blotter.order_batch_called)
def test_order_dead_asset(self):
# after asset 0 is dead
params = SimulationParameters(
start_session=pd.Timestamp("2007-01-03", tz='UTC'),
end_session=pd.Timestamp("2007-01-05", tz='UTC'),
trading_calendar=self.trading_calendar,
)
# order method shouldn't blow up
test_algo = TradingAlgorithm(
script="""
from zipline.api import order, sid
def initialize(context):
pass
def handle_data(context, data):
order(sid(0), 10)
""",
sim_params=params,
env=self.env
)
test_algo.run(self.data_portal)
# order_value and order_percent should blow up
for order_str in ["order_value", "order_percent"]:
test_algo = TradingAlgorithm(
script="""
from zipline.api import order_percent, order_value, sid
def initialize(context):
pass
def handle_data(context, data):
{0}(sid(0), 10)
""".format(order_str),
sim_params=params,
env=self.env
)
with self.assertRaises(CannotOrderDelistedAsset):
test_algo.run(self.data_portal)
def test_order_in_init(self):
"""
Test that calling order in initialize
will raise an error.
"""
with self.assertRaises(OrderDuringInitialize):
test_algo = TradingAlgorithm(
script=call_order_in_init,
sim_params=self.sim_params,
env=self.env,
)
test_algo.run(self.data_portal)
def test_portfolio_in_init(self):
"""
Test that accessing portfolio in init doesn't break.
"""
test_algo = TradingAlgorithm(
script=access_portfolio_in_init,
sim_params=self.sim_params,
env=self.env,
)
test_algo.run(self.data_portal)
def test_account_in_init(self):
"""
Test that accessing account in init doesn't break.
"""
test_algo = TradingAlgorithm(
script=access_account_in_init,
sim_params=self.sim_params,
env=self.env,
)
test_algo.run(self.data_portal)
def test_without_kwargs(self):
"""
Test that api methods on the data object can be called with positional
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
test_algo = TradingAlgorithm(
script=call_without_kwargs,
sim_params=params,
env=self.env,
)
test_algo.run(self.data_portal)
def test_good_kwargs(self):
"""
Test that api methods on the data object can be called with keyword
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
test_algo = TradingAlgorithm(
script=call_with_kwargs,
sim_params=params,
env=self.env,
)
test_algo.run(self.data_portal)
@parameterized.expand([('history', call_with_bad_kwargs_history),
('current', call_with_bad_kwargs_current)])
def test_bad_kwargs(self, name, algo_text):
"""
Test that api methods on the data object called with bad kwargs return
a meaningful TypeError that we create, rather than an unhelpful cython
error
"""
with self.assertRaises(TypeError) as cm:
test_algo = TradingAlgorithm(
script=algo_text,
sim_params=self.sim_params,
env=self.env,
)
test_algo.run(self.data_portal)
self.assertEqual("%s() got an unexpected keyword argument 'blahblah'"
% name, cm.exception.args[0])
@parameterized.expand(ARG_TYPE_TEST_CASES)
def test_arg_types(self, name, inputs):
keyword = name.split('__')[1]
with self.assertRaises(TypeError) as cm:
algo = TradingAlgorithm(
script=inputs[0],
sim_params=self.sim_params,
env=self.env
)
algo.run(self.data_portal)
expected = "Expected %s argument to be of type %s%s" % (
keyword,
'or iterable of type ' if inputs[2] else '',
inputs[1]
)
self.assertEqual(expected, cm.exception.args[0])
def test_empty_asset_list_to_history(self):
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
algo = TradingAlgorithm(
script=dedent("""
def initialize(context):
pass
def handle_data(context, data):
data.history([], "price", 5, '1d')
"""),
sim_params=params,
env=self.env
)
algo.run(self.data_portal)
@parameterized.expand(
[('bad_kwargs', call_with_bad_kwargs_get_open_orders),
('good_kwargs', call_with_good_kwargs_get_open_orders),
('no_kwargs', call_with_no_kwargs_get_open_orders)]
)
def test_get_open_orders_kwargs(self, name, script):
algo = TradingAlgorithm(
script=script,
sim_params=self.sim_params,
env=self.env
)
if name == 'bad_kwargs':
with self.assertRaises(TypeError) as cm:
algo.run(self.data_portal)
self.assertEqual('Keyword argument `sid` is no longer '
'supported for get_open_orders. Use `asset` '
'instead.', cm.exception.args[0])
else:
algo.run(self.data_portal)
def test_empty_positions(self):
"""
Test that when we try context.portfolio.positions[stock] on a stock
for which we have no positions, we return a Position with values 0
(but more importantly, we don't crash) and don't save this Position
to the user-facing dictionary PositionTracker._positions_store
"""
algo = TradingAlgorithm(
script=empty_positions,
sim_params=self.sim_params,
env=self.env
)
results = algo.run(self.data_portal)
num_positions = results.num_positions
amounts = results.amounts
self.assertTrue(all(num_positions == 0))
self.assertTrue(all(amounts == 0))
@parameterized.expand([
('noop_algo', noop_algo),
('with_benchmark_set', set_benchmark_algo)]
)
def test_zero_trading_days(self, name, algocode):
"""
Test that when we run a simulation with no trading days (e.g. beginning
and ending the same weekend), we don't crash on calculating the
benchmark
"""
sim_params = factory.create_simulation_parameters(
start=pd.Timestamp('2006-01-14', tz='UTC'),
end=pd.Timestamp('2006-01-15', tz='UTC')
)
algo = TradingAlgorithm(
script=algocode,
sim_params=sim_params,
env=self.env
)
algo.run(self.data_portal)
def test_schedule_function_time_rule_positionally_misplaced(self):
"""
Test that when a user specifies a time rule for the date_rule argument,
but no rule in the time_rule argument
(e.g. schedule_function(func, <time_rule>)), we assume that means
assign a time rule but no date rule
"""
sim_params = factory.create_simulation_parameters(
start=pd.Timestamp('2006-01-12', tz='UTC'),
end=pd.Timestamp('2006-01-13', tz='UTC'),
data_frequency='minute'
)
algocode = dedent("""
from zipline.api import time_rules, schedule_function
def do_at_open(context, data):
context.done_at_open.append(context.get_datetime())
def do_at_close(context, data):
context.done_at_close.append(context.get_datetime())
def initialize(context):
context.done_at_open = []
context.done_at_close = []
schedule_function(do_at_open, time_rules.market_open())
schedule_function(do_at_close, time_rules.market_close())
def handle_data(algo, data):
pass
""")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
algo = TradingAlgorithm(
script=algocode,
sim_params=sim_params,
env=self.env
)
algo.run(self.data_portal)
self.assertEqual(len(w), 2)
for i, warning in enumerate(w):
self.assertIsInstance(warning.message, UserWarning)
self.assertEqual(
warning.message.args[0],
'Got a time rule for the second positional argument '
'date_rule. You should use keyword argument '
'time_rule= when calling schedule_function without '
'specifying a date_rule'
)
# The warnings come from line 13 and 14 in the algocode
self.assertEqual(warning.lineno, 13 + i)
self.assertEqual(
algo.done_at_open,
[pd.Timestamp('2006-01-12 14:31:00', tz='UTC'),
pd.Timestamp('2006-01-13 14:31:00', tz='UTC')]
)
self.assertEqual(
algo.done_at_close,
[pd.Timestamp('2006-01-12 20:59:00', tz='UTC'),
pd.Timestamp('2006-01-13 20:59:00', tz='UTC')]
)
class TestCapitalChanges(WithLogger,
WithDataPortal,
WithSimParams,
ZiplineTestCase):
sids = 0, 1
@classmethod
def make_equity_info(cls):
data = make_simple_equity_info(
cls.sids,
pd.Timestamp('2006-01-03', tz='UTC'),
pd.Timestamp('2006-01-09', tz='UTC'),
)
return data
@classmethod
def make_equity_minute_bar_data(cls):
minutes = cls.trading_calendar.minutes_in_range(
pd.Timestamp('2006-01-03', tz='UTC'),
pd.Timestamp('2006-01-09', tz='UTC')
)
return trades_by_sid_to_dfs(
{
1: factory.create_trade_history(
1,
np.arange(100.0, 100.0 + len(minutes), 1),
[10000] * len(minutes),
timedelta(minutes=1),
cls.sim_params,
cls.trading_calendar),
},
index=pd.DatetimeIndex(minutes),
)
@classmethod
def make_equity_daily_bar_data(cls):
days = cls.trading_calendar.sessions_in_range(
pd.Timestamp('2006-01-03', tz='UTC'),
pd.Timestamp('2006-01-09', tz='UTC')
)
return trades_by_sid_to_dfs(
{
0: factory.create_trade_history(
0,
np.arange(10.0, 10.0 + len(days), 1.0),
[10000] * len(days),
timedelta(days=1),
cls.sim_params,
cls.trading_calendar),
},
index=pd.DatetimeIndex(days),
)
@parameterized.expand([
('target', 153000.0), ('delta', 50000.0)
])
def test_capital_changes_daily_mode(self, change_type, value):
sim_params = factory.create_simulation_parameters(
start=pd.Timestamp('2006-01-03', tz='UTC'),
end=pd.Timestamp('2006-01-09', tz='UTC')
)
capital_changes = {
pd.Timestamp('2006-01-06', tz='UTC'):
{'type': change_type, 'value': value}
}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(0), 1000)
"""
algo = TradingAlgorithm(
script=algocode,
sim_params=sim_params,
env=self.env,
data_portal=self.data_portal,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), 1)
self.assertEqual(
capital_change_packets[0],
{'date': pd.Timestamp('2006-01-06', tz='UTC'),
'type': 'cash',
'target': 153000.0 if change_type == 'target' else None,
'delta': 50000.0})
# 1/03: price = 10, place orders
# 1/04: orders execute at price = 11, place orders
# 1/05: orders execute at price = 12, place orders
# 1/06: +50000 capital change,
# orders execute at price = 13, place orders
# 1/09: orders execute at price = 14, place orders
expected_daily = {}
expected_capital_changes = np.array([
0.0, 0.0, 0.0, 50000.0, 0.0
])
# Day 1, no transaction. Day 2, we transact, but the price of our stock
# does not change. Day 3, we start getting returns
expected_daily['returns'] = np.array([
0.0,
0.0,
# 1000 shares * gain of 1
(100000.0 + 1000.0)/100000.0 - 1.0,
# 2000 shares * gain of 1, capital change of +5000
(151000.0 + 2000.0)/151000.0 - 1.0,
# 3000 shares * gain of 1
(153000.0 + 3000.0)/153000.0 - 1.0,
])
expected_daily['pnl'] = np.array([
0.0,
0.0,
1000.00, # 1000 shares * gain of 1
2000.00, # 2000 shares * gain of 1
3000.00, # 3000 shares * gain of 1
])
expected_daily['capital_used'] = np.array([
0.0,
-11000.0, # 1000 shares at price = 11
-12000.0, # 1000 shares at price = 12
-13000.0, # 1000 shares at price = 13
-14000.0, # 1000 shares at price = 14
])
expected_daily['ending_cash'] = \
np.array([100000.0] * 5) + \
np.cumsum(expected_capital_changes) + \
np.cumsum(expected_daily['capital_used'])
expected_daily['starting_cash'] = \
expected_daily['ending_cash'] - \
expected_daily['capital_used']
expected_daily['starting_value'] = [
0.0,
0.0,
11000.0, # 1000 shares at price = 11
24000.0, # 2000 shares at price = 12
39000.0, # 3000 shares at price = 13
]
expected_daily['ending_value'] = \
expected_daily['starting_value'] + \
expected_daily['pnl'] - \
expected_daily['capital_used']
expected_daily['portfolio_value'] = \
expected_daily['ending_value'] + \
expected_daily['ending_cash']
stats = [
'returns', 'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value'
]
expected_cumulative = {
'returns': np.cumprod(expected_daily['returns'] + 1) - 1,
'pnl': np.cumsum(expected_daily['pnl']),
'capital_used': np.cumsum(expected_daily['capital_used']),
'starting_cash':
np.repeat(expected_daily['starting_cash'][0:1], 5),
'ending_cash': expected_daily['ending_cash'],
'starting_value':
np.repeat(expected_daily['starting_value'][0:1], 5),
'ending_value': expected_daily['ending_value'],
'portfolio_value': expected_daily['portfolio_value'],
}
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat]
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat]
)
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-06', tz='UTC'): 50000.0}
)
@parameterized.expand([
('interday_target', [('2006-01-04', 2388.0)]),
('interday_delta', [('2006-01-04', 1000.0)]),
('intraday_target', [('2006-01-04 17:00', 2186.0),
('2006-01-04 18:00', 2806.0)]),
('intraday_delta', [('2006-01-04 17:00', 500.0),
('2006-01-04 18:00', 500.0)]),
])
def test_capital_changes_minute_mode_daily_emission(self, change, values):
change_loc, change_type = change.split('_')
sim_params = factory.create_simulation_parameters(
start=pd.Timestamp('2006-01-03', tz='UTC'),
end=pd.Timestamp('2006-01-05', tz='UTC'),
data_frequency='minute',
capital_base=1000.0
)
capital_changes = {pd.Timestamp(val[0], tz='UTC'): {
'type': change_type, 'value': val[1]} for val in values}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(1), 1)
"""
algo = TradingAlgorithm(
script=algocode,
sim_params=sim_params,
env=self.env,
data_portal=self.data_portal,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), len(capital_changes))
expected = [
{'date': pd.Timestamp(val[0], tz='UTC'),
'type': 'cash',
'target': val[1] if change_type == 'target' else None,
'delta': 1000.0 if len(values) == 1 else 500.0}
for val in values]
self.assertEqual(capital_change_packets, expected)
# 1/03: place orders at price = 100, execute at 101
# 1/04: place orders at price = 490, execute at 491,
# +500 capital change at 17:00 and 18:00 (intraday)
# or +1000 at 00:00 (interday),
# 1/05: place orders at price = 880, execute at 881
expected_daily = {}
expected_capital_changes = np.array([
0.0, 1000.0, 0.0
])
if change_loc == 'intraday':
# Fills at 491, +500 capital change comes at 638 (17:00) and
# 698 (18:00), ends day at 879
day2_return = (1388.0 + 149.0 + 147.0)/1388.0 * \
(2184.0 + 60.0 + 60.0)/2184.0 * \
(2804.0 + 181.0 + 181.0)/2804.0 - 1.0
else:
# Fills at 491, ends day at 879, capital change +1000
day2_return = (2388.0 + 390.0 + 388.0)/2388.0 - 1
expected_daily['returns'] = np.array([
# Fills at 101, ends day at 489
(1000.0 + 388.0)/1000.0 - 1.0,
day2_return,
# Fills at 881, ends day at 1269
(3166.0 + 390.0 + 390.0 + 388.0)/3166.0 - 1.0,
])
expected_daily['pnl'] = np.array([
388.0,
390.0 + 388.0,
390.0 + 390.0 + 388.0,
])
expected_daily['capital_used'] = np.array([
-101.0, -491.0, -881.0
])
expected_daily['ending_cash'] = \
np.array([1000.0] * 3) + \
np.cumsum(expected_capital_changes) + \
np.cumsum(expected_daily['capital_used'])
expected_daily['starting_cash'] = \
expected_daily['ending_cash'] - \
expected_daily['capital_used']
if change_loc == 'intraday':
# Capital changes come after day start
expected_daily['starting_cash'] -= expected_capital_changes
expected_daily['starting_value'] = np.array([
0.0, 489.0, 879.0 * 2
])
expected_daily['ending_value'] = \
expected_daily['starting_value'] + \
expected_daily['pnl'] - \
expected_daily['capital_used']
expected_daily['portfolio_value'] = \
expected_daily['ending_value'] + \
expected_daily['ending_cash']
stats = [
'returns', 'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value'
]
expected_cumulative = {
'returns': np.cumprod(expected_daily['returns'] + 1) - 1,
'pnl': np.cumsum(expected_daily['pnl']),
'capital_used': np.cumsum(expected_daily['capital_used']),
'starting_cash':
np.repeat(expected_daily['starting_cash'][0:1], 3),
'ending_cash': expected_daily['ending_cash'],
'starting_value':
np.repeat(expected_daily['starting_value'][0:1], 3),
'ending_value': expected_daily['ending_value'],
'portfolio_value': expected_daily['portfolio_value'],
}
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat]
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat]
)
if change_loc == 'interday':
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-04', tz='UTC'): 1000.0}
)
else:
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-04 17:00', tz='UTC'): 500.0,
pd.Timestamp('2006-01-04 18:00', tz='UTC'): 500.0}
)
@parameterized.expand([
('interday_target', [('2006-01-04', 2388.0)]),
('interday_delta', [('2006-01-04', 1000.0)]),
('intraday_target', [('2006-01-04 17:00', 2186.0),
('2006-01-04 18:00', 2806.0)]),
('intraday_delta', [('2006-01-04 17:00', 500.0),
('2006-01-04 18:00', 500.0)]),
])
def test_capital_changes_minute_mode_minute_emission(self, change, values):
change_loc, change_type = change.split('_')
sim_params = factory.create_simulation_parameters(
start=pd.Timestamp('2006-01-03', tz='UTC'),
end=pd.Timestamp('2006-01-05', tz='UTC'),
data_frequency='minute',
emission_rate='minute',
capital_base=1000.0
)
capital_changes = {pd.Timestamp(val[0], tz='UTC'): {
'type': change_type, 'value': val[1]} for val in values}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(1), 1)
"""
algo = TradingAlgorithm(
script=algocode,
sim_params=sim_params,
env=self.env,
data_portal=self.data_portal,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
minute_perf = [r['minute_perf'] for r in results if 'minute_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), len(capital_changes))
expected = [
{'date': pd.Timestamp(val[0], tz='UTC'),
'type': 'cash',
'target': val[1] if change_type == 'target' else None,
'delta': 1000.0 if len(values) == 1 else 500.0}
for val in values]
self.assertEqual(capital_change_packets, expected)
# 1/03: place orders at price = 100, execute at 101
# 1/04: place orders at price = 490, execute at 491,
# +500 capital change at 17:00 and 18:00 (intraday)
# or +1000 at 00:00 (interday),
# 1/05: place orders at price = 880, execute at 881
# Minute perfs are cumulative for the day
expected_minute = {}
capital_changes_after_start = np.array([0.0] * 1170)
if change_loc == 'intraday':
capital_changes_after_start[539:599] = 500.0
capital_changes_after_start[599:780] = 1000.0
expected_minute['pnl'] = np.array([0.0] * 1170)
expected_minute['pnl'][:2] = 0.0
expected_minute['pnl'][2:392] = 1.0
expected_minute['pnl'][392:782] = 2.0
expected_minute['pnl'][782:] = 3.0
for start, end in ((0, 390), (390, 780), (780, 1170)):
expected_minute['pnl'][start:end] = \
np.cumsum(expected_minute['pnl'][start:end])
expected_minute['capital_used'] = np.concatenate((
[0.0] * 1, [-101.0] * 389,
[0.0] * 1, [-491.0] * 389,
[0.0] * 1, [-881.0] * 389,
))
# +1000 capital changes comes before the day start if interday
day2adj = 0.0 if change_loc == 'intraday' else 1000.0
expected_minute['starting_cash'] = np.concatenate((
[1000.0] * 390,
# 101 spent on 1/03
[1000.0 - 101.0 + day2adj] * 390,
# 101 spent on 1/03, 491 on 1/04, +1000 capital change on 1/04
[1000.0 - 101.0 - 491.0 + 1000] * 390
))
expected_minute['ending_cash'] = \
expected_minute['starting_cash'] + \
expected_minute['capital_used'] + \
capital_changes_after_start
expected_minute['starting_value'] = np.concatenate((
[0.0] * 390,
[489.0] * 390,
[879.0 * 2] * 390
))
expected_minute['ending_value'] = \
expected_minute['starting_value'] + \
expected_minute['pnl'] - \
expected_minute['capital_used']
expected_minute['portfolio_value'] = \
expected_minute['ending_value'] + \
expected_minute['ending_cash']
expected_minute['returns'] = \
expected_minute['pnl'] / \
(expected_minute['starting_value'] +
expected_minute['starting_cash'])
# If the change is interday, we can just calculate the returns from
# the pnl, starting_value and starting_cash. If the change is intraday,
# the returns after the change have to be calculated from two
# subperiods
if change_loc == 'intraday':
# The last packet (at 1/04 16:59) before the first capital change
prev_subperiod_return = expected_minute['returns'][538]
# From 1/04 17:00 to 17:59
cur_subperiod_pnl = \
expected_minute['pnl'][539:599] - expected_minute['pnl'][538]
cur_subperiod_starting_value = \
np.array([expected_minute['ending_value'][538]] * 60)
cur_subperiod_starting_cash = \
np.array([expected_minute['ending_cash'][538] + 500] * 60)
cur_subperiod_returns = cur_subperiod_pnl / \
(cur_subperiod_starting_value + cur_subperiod_starting_cash)
expected_minute['returns'][539:599] = \
(cur_subperiod_returns + 1.0) * \
(prev_subperiod_return + 1.0) - \
1.0
# The last packet (at 1/04 17:59) before the second capital change
prev_subperiod_return = expected_minute['returns'][598]
# From 1/04 18:00 to 21:00
cur_subperiod_pnl = \
expected_minute['pnl'][599:780] - expected_minute['pnl'][598]
cur_subperiod_starting_value = \
np.array([expected_minute['ending_value'][598]] * 181)
cur_subperiod_starting_cash = \
np.array([expected_minute['ending_cash'][598] + 500] * 181)
cur_subperiod_returns = cur_subperiod_pnl / \
(cur_subperiod_starting_value + cur_subperiod_starting_cash)
expected_minute['returns'][599:780] = \
(cur_subperiod_returns + 1.0) * \
(prev_subperiod_return + 1.0) - \
1.0
# The last minute packet of each day
expected_daily = {
k: np.array([v[389], v[779], v[1169]])
for k, v in iteritems(expected_minute)
}
stats = [
'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value', 'returns'
]
expected_cumulative = deepcopy(expected_minute)
# "Add" daily return from 1/03 to minute returns on 1/04 and 1/05
# "Add" daily return from 1/04 to minute returns on 1/05
expected_cumulative['returns'][390:] = \
(expected_cumulative['returns'][390:] + 1) * \
(expected_daily['returns'][0] + 1) - 1
expected_cumulative['returns'][780:] = \
(expected_cumulative['returns'][780:] + 1) * \
(expected_daily['returns'][1] + 1) - 1
# Add daily pnl/capital_used from 1/03 to 1/04 and 1/05
# Add daily pnl/capital_used from 1/04 to 1/05
expected_cumulative['pnl'][390:] += expected_daily['pnl'][0]
expected_cumulative['pnl'][780:] += expected_daily['pnl'][1]
expected_cumulative['capital_used'][390:] += \
expected_daily['capital_used'][0]
expected_cumulative['capital_used'][780:] += \
expected_daily['capital_used'][1]
# starting_cash, starting_value are same as those of the first daily
# packet
expected_cumulative['starting_cash'] = \
np.repeat(expected_daily['starting_cash'][0:1], 1170)
expected_cumulative['starting_value'] = \
np.repeat(expected_daily['starting_value'][0:1], 1170)
# extra cumulative packet per day from the daily packet
for stat in stats:
for i in (390, 781, 1172):
expected_cumulative[stat] = np.insert(
expected_cumulative[stat],
i,
expected_cumulative[stat][i-1]
)
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in minute_perf]),
expected_minute[stat]
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat]
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat]
)
if change_loc == 'interday':
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-04', tz='UTC'): 1000.0}
)
else:
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-04 17:00', tz='UTC'): 500.0,
pd.Timestamp('2006-01-04 18:00', tz='UTC'): 500.0}
)
class TestGetDatetime(WithLogger,
WithSimParams,
WithDataPortal,
ZiplineTestCase):
SIM_PARAMS_DATA_FREQUENCY = 'minute'
START_DATE = to_utc('2014-01-02 9:31')
END_DATE = to_utc('2014-01-03 9:31')
ASSET_FINDER_EQUITY_SIDS = 0, 1
@parameterized.expand(
[
('default', None,),
('utc', 'UTC',),
('us_east', 'US/Eastern',),
]
)
def test_get_datetime(self, name, tz):
algo = dedent(
"""
import pandas as pd
from zipline.api import get_datetime
def initialize(context):
context.tz = {tz} or 'UTC'
context.first_bar = True
def handle_data(context, data):
dt = get_datetime({tz})
if dt.tz.zone != context.tz:
raise ValueError("Mismatched Zone")
if context.first_bar:
if dt.tz_convert("US/Eastern").hour != 9:
raise ValueError("Mismatched Hour")
elif dt.tz_convert("US/Eastern").minute != 31:
raise ValueError("Mismatched Minute")
context.first_bar = False
""".format(tz=repr(tz))
)
algo = TradingAlgorithm(
script=algo,
sim_params=self.sim_params,
env=self.env,
)
algo.run(self.data_portal)
self.assertFalse(algo.first_bar)
class TestTradingControls(WithSimParams, WithDataPortal, ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-01-06', tz='utc')
sid = 133
sids = ASSET_FINDER_EQUITY_SIDS = 133, 134
@classmethod
def init_class_fixtures(cls):
super(TestTradingControls, cls).init_class_fixtures()
cls.asset = cls.asset_finder.retrieve_asset(cls.sid)
cls.another_asset = cls.asset_finder.retrieve_asset(134)
def _check_algo(self,
algo,
handle_data,
expected_order_count,
expected_exc):
algo._handle_data = handle_data
with self.assertRaises(expected_exc) if expected_exc else nullctx():
algo.run(self.data_portal)
self.assertEqual(algo.order_count, expected_order_count)
def check_algo_succeeds(self, algo, handle_data, order_count=4):
# Default for order_count assumes one order per handle_data call.
self._check_algo(algo, handle_data, order_count, None)
def check_algo_fails(self, algo, handle_data, order_count):
self._check_algo(algo,
handle_data,
order_count,
TradingControlViolation)
def test_set_max_position_size(self):
# Buy one share four times. Should be fine.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(asset=self.asset,
max_shares=10,
max_notional=500.0,
sim_params=self.sim_params,
env=self.env)
self.check_algo_succeeds(algo, handle_data)
# Buy three shares four times. Should bail on the fourth before it's
# placed.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(asset=self.asset,
max_shares=10,
max_notional=500.0,
sim_params=self.sim_params,
env=self.env)
self.check_algo_fails(algo, handle_data, 3)
# Buy three shares four times. Should bail due to max_notional on the
# third attempt.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(asset=self.asset,
max_shares=10,
max_notional=67.0,
sim_params=self.sim_params,
env=self.env)
self.check_algo_fails(algo, handle_data, 2)
# Set the trading control to a different sid, then BUY ALL THE THINGS!.
# Should continue normally.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(asset=self.another_asset,
max_shares=10,
max_notional=67.0,
sim_params=self.sim_params,
env=self.env)
self.check_algo_succeeds(algo, handle_data)
# Set the trading control sid to None, then BUY ALL THE THINGS!. Should
# fail because setting sid to None makes the control apply to all sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(max_shares=10, max_notional=61.0,
sim_params=self.sim_params,
env=self.env)
self.check_algo_fails(algo, handle_data, 0)
def test_set_asset_restrictions(self):
def handle_data(algo, data):
algo.could_trade = data.can_trade(algo.sid(self.sid))
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
# Set HistoricalRestrictions for one sid for the entire simulation,
# and fail.
rlm = HistoricalRestrictions([
Restriction(
self.sid,
self.sim_params.start_session,
RESTRICTION_STATES.FROZEN)
])
algo = SetAssetRestrictionsAlgorithm(
sid=self.sid,
restrictions=rlm,
sim_params=self.sim_params,
env=self.env,
)
self.check_algo_fails(algo, handle_data, 0)
self.assertFalse(algo.could_trade)
# Set StaticRestrictions for one sid and fail.
rlm = StaticRestrictions([self.sid])
algo = SetAssetRestrictionsAlgorithm(
sid=self.sid,
restrictions=rlm,
sim_params=self.sim_params,
env=self.env,
)
self.check_algo_fails(algo, handle_data, 0)
self.assertFalse(algo.could_trade)
# just log an error on the violation if we choose not to fail.
algo = SetAssetRestrictionsAlgorithm(
sid=self.sid,
restrictions=rlm,
sim_params=self.sim_params,
env=self.env,
on_error='log'
)
with make_test_handler(self) as log_catcher:
self.check_algo_succeeds(algo, handle_data)
logs = [r.message for r in log_catcher.records]
self.assertIn("Order for 100 shares of Equity(133 [A]) at "
"2006-01-03 21:00:00+00:00 violates trading constraint "
"RestrictedListOrder({})", logs)
self.assertFalse(algo.could_trade)
# set the restricted list to exclude the sid, and succeed
rlm = HistoricalRestrictions([
Restriction(
sid,
self.sim_params.start_session,
RESTRICTION_STATES.FROZEN) for sid in [134, 135, 136]
])
algo = SetAssetRestrictionsAlgorithm(
sid=self.sid,
restrictions=rlm,
sim_params=self.sim_params,
env=self.env,
)
self.check_algo_succeeds(algo, handle_data)
self.assertTrue(algo.could_trade)
@parameterized.expand([
('order_first_restricted_sid', 0),
('order_second_restricted_sid', 1)
])
def test_set_multiple_asset_restrictions(self, name, to_order_idx):
def handle_data(algo, data):
algo.could_trade1 = data.can_trade(algo.sid(self.sids[0]))
algo.could_trade2 = data.can_trade(algo.sid(self.sids[1]))
algo.order(algo.sid(self.sids[to_order_idx]), 100)
algo.order_count += 1
rl1 = StaticRestrictions([self.sids[0]])
rl2 = StaticRestrictions([self.sids[1]])
algo = SetMultipleAssetRestrictionsAlgorithm(
restrictions1=rl1,
restrictions2=rl2,
sim_params=self.sim_params,
env=self.env,
)
self.check_algo_fails(algo, handle_data, 0)
self.assertFalse(algo.could_trade1)
self.assertFalse(algo.could_trade2)
def test_set_do_not_order_list(self):
def handle_data(algo, data):
algo.could_trade = data.can_trade(algo.sid(self.sid))
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
rlm = [self.sid]
algo = SetDoNotOrderListAlgorithm(
sid=self.sid,
restricted_list=rlm,
sim_params=self.sim_params,
env=self.env,
)
self.check_algo_fails(algo, handle_data, 0)
self.assertFalse(algo.could_trade)
def test_set_max_order_size(self):
# Buy one share.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(asset=self.asset,
max_shares=10,
max_notional=500.0,
sim_params=self.sim_params,
env=self.env)
self.check_algo_succeeds(algo, handle_data)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed shares.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(asset=self.asset,
max_shares=3,
max_notional=500.0,
sim_params=self.sim_params,
env=self.env)
self.check_algo_fails(algo, handle_data, 3)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed notional.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(asset=self.asset,
max_shares=10,
max_notional=40.0,
sim_params=self.sim_params,
env=self.env)
self.check_algo_fails(algo, handle_data, 3)
# Set the trading control to a different sid, then BUY ALL THE THINGS!.
# Should continue normally.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(asset=self.another_asset,
max_shares=1,
max_notional=1.0,
sim_params=self.sim_params,
env=self.env)
self.check_algo_succeeds(algo, handle_data)
# Set the trading control sid to None, then BUY ALL THE THINGS!.
# Should fail because not specifying a sid makes the trading control
# apply to all sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(max_shares=1,
max_notional=1.0,
sim_params=self.sim_params,
env=self.env)
self.check_algo_fails(algo, handle_data, 0)
def test_set_max_order_count(self):
start = pd.Timestamp('2006-01-05', tz='utc')
metadata = pd.DataFrame.from_dict(
{
1: {
'symbol': 'SYM',
'start_date': start,
'end_date': start + timedelta(days=6),
'exchange': "TEST",
},
},
orient='index',
)
with TempDirectory() as tempdir, \
tmp_trading_env(equities=metadata,
load=self.make_load_function()) as env:
sim_params = factory.create_simulation_parameters(
start=start,
num_days=4,
data_frequency='minute',
)
data_portal = create_data_portal(
env.asset_finder,
tempdir,
sim_params,
[1],
self.trading_calendar,
)
def handle_data(algo, data):
for i in range(5):
algo.order(algo.sid(1), 1)
algo.order_count += 1
algo = SetMaxOrderCountAlgorithm(3, sim_params=sim_params,
env=env)
with self.assertRaises(TradingControlViolation):
algo._handle_data = handle_data
algo.run(data_portal)
self.assertEqual(algo.order_count, 3)
# This time, order 5 times twice in a single day. The last order
# of the second batch should fail.
def handle_data2(algo, data):
if algo.minute_count == 0 or algo.minute_count == 100:
for i in range(5):
algo.order(algo.sid(1), 1)
algo.order_count += 1
algo.minute_count += 1
algo = SetMaxOrderCountAlgorithm(9, sim_params=sim_params,
env=env)
with self.assertRaises(TradingControlViolation):
algo._handle_data = handle_data2
algo.run(data_portal)
self.assertEqual(algo.order_count, 9)
def handle_data3(algo, data):
if (algo.minute_count % 390) == 0:
for i in range(5):
algo.order(algo.sid(1), 1)
algo.order_count += 1
algo.minute_count += 1
# Only 5 orders are placed per day, so this should pass even
# though in total more than 20 orders are placed.
algo = SetMaxOrderCountAlgorithm(5, sim_params=sim_params,
env=env)
algo._handle_data = handle_data3
algo.run(data_portal)
def test_long_only(self):
# Sell immediately -> fail immediately.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = SetLongOnlyAlgorithm(sim_params=self.sim_params, env=self.env)
self.check_algo_fails(algo, handle_data, 0)
# Buy on even days, sell on odd days. Never takes a short position, so
# should succeed.
def handle_data(algo, data):
if (algo.order_count % 2) == 0:
algo.order(algo.sid(self.sid), 1)
else:
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = SetLongOnlyAlgorithm(sim_params=self.sim_params, env=self.env)
self.check_algo_succeeds(algo, handle_data)
# Buy on first three days, then sell off holdings. Should succeed.
def handle_data(algo, data):
amounts = [1, 1, 1, -3]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = SetLongOnlyAlgorithm(sim_params=self.sim_params, env=self.env)
self.check_algo_succeeds(algo, handle_data)
# Buy on first three days, then sell off holdings plus an extra share.
# Should fail on the last sale.
def handle_data(algo, data):
amounts = [1, 1, 1, -4]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = SetLongOnlyAlgorithm(sim_params=self.sim_params, env=self.env)
self.check_algo_fails(algo, handle_data, 3)
def test_register_post_init(self):
def initialize(algo):
algo.initialized = True
def handle_data(algo, data):
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_position_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_order_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_order_count(1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_long_only()
algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data,
sim_params=self.sim_params,
env=self.env)
algo.run(self.data_portal)
def test_asset_date_bounds(self):
metadata = pd.DataFrame([{
'symbol': 'SYM',
'start_date': self.sim_params.start_session,
'end_date': '2020-01-01',
'exchange': "TEST",
'sid': 999,
}])
with TempDirectory() as tempdir, \
tmp_trading_env(equities=metadata,
load=self.make_load_function()) as env:
algo = SetAssetDateBoundsAlgorithm(
sim_params=self.sim_params,
env=env,
)
data_portal = create_data_portal(
env.asset_finder,
tempdir,
self.sim_params,
[999],
self.trading_calendar,
)
algo.run(data_portal)
metadata = pd.DataFrame([{
'symbol': 'SYM',
'start_date': '1989-01-01',
'end_date': '1990-01-01',
'exchange': "TEST",
'sid': 999,
}])
with TempDirectory() as tempdir, \
tmp_trading_env(equities=metadata,
load=self.make_load_function()) as env:
data_portal = create_data_portal(
env.asset_finder,
tempdir,
self.sim_params,
[999],
self.trading_calendar,
)
algo = SetAssetDateBoundsAlgorithm(
sim_params=self.sim_params,
env=env,
)
with self.assertRaises(TradingControlViolation):
algo.run(data_portal)
metadata = pd.DataFrame([{
'symbol': 'SYM',
'start_date': '2020-01-01',
'end_date': '2021-01-01',
'exchange': "TEST",
'sid': 999,
}])
with TempDirectory() as tempdir, \
tmp_trading_env(equities=metadata,
load=self.make_load_function()) as env:
data_portal = create_data_portal(
env.asset_finder,
tempdir,
self.sim_params,
[999],
self.trading_calendar,
)
algo = SetAssetDateBoundsAlgorithm(
sim_params=self.sim_params,
env=env,
)
with self.assertRaises(TradingControlViolation):
algo.run(data_portal)
class TestAccountControls(WithDataPortal, WithSimParams, ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-01-06', tz='utc')
sidint, = ASSET_FINDER_EQUITY_SIDS = (133,)
@classmethod
def make_equity_daily_bar_data(cls):
return trades_by_sid_to_dfs(
{
cls.sidint: factory.create_trade_history(
cls.sidint,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
cls.sim_params,
cls.trading_calendar,
),
},
index=cls.sim_params.sessions,
)
def _check_algo(self,
algo,
handle_data,
expected_exc):
algo._handle_data = handle_data
with self.assertRaises(expected_exc) if expected_exc else nullctx():
algo.run(self.data_portal)
def check_algo_succeeds(self, algo, handle_data):
# Default for order_count assumes one order per handle_data call.
self._check_algo(algo, handle_data, None)
def check_algo_fails(self, algo, handle_data):
self._check_algo(algo,
handle_data,
AccountControlViolation)
def test_set_max_leverage(self):
# Set max leverage to 0 so buying one share fails.
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo.record(latest_time=algo.get_datetime())
algo = SetMaxLeverageAlgorithm(0, sim_params=self.sim_params,
env=self.env)
self.check_algo_fails(algo, handle_data)
self.assertEqual(
algo.recorded_vars['latest_time'],
pd.Timestamp('2006-01-04 21:00:00', tz='UTC'),
)
# Set max leverage to 1 so buying one share passes
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo = SetMaxLeverageAlgorithm(1, sim_params=self.sim_params,
env=self.env)
self.check_algo_succeeds(algo, handle_data)
# FIXME re-implement this testcase in q2
# class TestClosePosAlgo(TestCase):
# def setUp(self):
# self.env = TradingEnvironment()
# self.days = self.env.trading_days[:5]
# self.panel = pd.Panel({1: pd.DataFrame({
# 'price': [1, 1, 2, 4, 8], 'volume': [1e9, 1e9, 1e9, 1e9, 0],
# 'type': [DATASOURCE_TYPE.TRADE,
# DATASOURCE_TYPE.TRADE,
# DATASOURCE_TYPE.TRADE,
# DATASOURCE_TYPE.TRADE,
# DATASOURCE_TYPE.CLOSE_POSITION]},
# index=self.days)
# })
# self.no_close_panel = pd.Panel({1: pd.DataFrame({
# 'price': [1, 1, 2, 4, 8], 'volume': [1e9, 1e9, 1e9, 1e9, 1e9],
# 'type': [DATASOURCE_TYPE.TRADE,
# DATASOURCE_TYPE.TRADE,
# DATASOURCE_TYPE.TRADE,
# DATASOURCE_TYPE.TRADE,
# DATASOURCE_TYPE.TRADE]},
# index=self.days)
# })
#
# def test_close_position_equity(self):
# metadata = {1: {'symbol': 'TEST',
# 'end_date': self.days[4]}}
# self.env.write_data(equities_data=metadata)
# algo = TestAlgorithm(sid=1, amount=1, order_count=1,
# commission=PerShare(0),
# env=self.env)
# data = DataPanelSource(self.panel)
#
# # Check results
# expected_positions = [0, 1, 1, 1, 0]
# expected_pnl = [0, 0, 1, 2, 4]
# results = algo.run(data)
# self.check_algo_positions(results, expected_positions)
# self.check_algo_pnl(results, expected_pnl)
#
# def test_close_position_future(self):
# metadata = {1: {'symbol': 'TEST'}}
# self.env.write_data(futures_data=metadata)
# algo = TestAlgorithm(sid=1, amount=1, order_count=1,
# commission=PerShare(0),
# env=self.env)
# data = DataPanelSource(self.panel)
#
# # Check results
# expected_positions = [0, 1, 1, 1, 0]
# expected_pnl = [0, 0, 1, 2, 4]
# results = algo.run(data)
# self.check_algo_pnl(results, expected_pnl)
# self.check_algo_positions(results, expected_positions)
#
# def test_auto_close_future(self):
# metadata = {1: {'symbol': 'TEST',
# 'auto_close_date': self.env.trading_days[4]}}
# self.env.write_data(futures_data=metadata)
# algo = TestAlgorithm(sid=1, amount=1, order_count=1,
# commission=PerShare(0),
# env=self.env)
# data = DataPanelSource(self.no_close_panel)
#
# # Check results
# results = algo.run(data)
#
# expected_positions = [0, 1, 1, 1, 0]
# self.check_algo_positions(results, expected_positions)
#
# expected_pnl = [0, 0, 1, 2, 0]
# self.check_algo_pnl(results, expected_pnl)
#
# def check_algo_pnl(self, results, expected_pnl):
# np.testing.assert_array_almost_equal(results.pnl, expected_pnl)
#
# def check_algo_positions(self, results, expected_positions):
# for i, amount in enumerate(results.positions):
# if amount:
# actual_position = amount[0]['amount']
# else:
# actual_position = 0
#
# self.assertEqual(
# actual_position, expected_positions[i],
# "position for day={0} not equal, actual={1}, expected={2}".
# format(i, actual_position, expected_positions[i]))
class TestFutureFlip(WithDataPortal, WithSimParams, ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-09', tz='utc')
END_DATE = pd.Timestamp('2006-01-10', tz='utc')
sid, = ASSET_FINDER_EQUITY_SIDS = (1,)
@classmethod
def make_equity_daily_bar_data(cls):
return trades_by_sid_to_dfs(
{
cls.sid: factory.create_trade_history(
cls.sid,
[1, 2],
[1e9, 1e9],
timedelta(days=1),
cls.sim_params,
cls.trading_calendar,
),
},
index=cls.sim_params.sessions,
)
@skip('broken in zipline 1.0.0')
def test_flip_algo(self):
metadata = {1: {'symbol': 'TEST',
'start_date': self.sim_params.trading_days[0],
'end_date': self.trading_calendar.next_session_label(
self.sim_params.sessions[-1]
),
'multiplier': 5}}
self.env.write_data(futures_data=metadata)
algo = FutureFlipAlgo(sid=1, amount=1, env=self.env,
commission=PerShare(0),
order_count=0, # not applicable but required
sim_params=self.sim_params)
results = algo.run(self.data_portal)
expected_positions = [0, 1, -1]
self.check_algo_positions(results, expected_positions)
expected_pnl = [0, 5, -10]
self.check_algo_pnl(results, expected_pnl)
def check_algo_pnl(self, results, expected_pnl):
np.testing.assert_array_almost_equal(results.pnl, expected_pnl)
def check_algo_positions(self, results, expected_positions):
for i, amount in enumerate(results.positions):
if amount:
actual_position = amount[0]['amount']
else:
actual_position = 0
self.assertEqual(
actual_position, expected_positions[i],
"position for day={0} not equal, actual={1}, expected={2}".
format(i, actual_position, expected_positions[i]))
class TestFuturesAlgo(WithDataPortal, WithSimParams, ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-06', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
FUTURE_MINUTE_BAR_START_DATE = pd.Timestamp('2016-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
TRADING_CALENDAR_STRS = ('us_futures',)
TRADING_CALENDAR_PRIMARY_CAL = 'us_futures'
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
1: {
'symbol': 'CLG16',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2015-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2016-01-20', tz='UTC'),
'expiration_date': pd.Timestamp('2016-02-19', tz='UTC'),
'auto_close_date': pd.Timestamp('2016-01-18', tz='UTC'),
'exchange': 'TEST',
},
},
orient='index',
)
def test_futures_history(self):
algo_code = dedent(
"""
from datetime import time
from zipline.api import (
date_rules,
get_datetime,
schedule_function,
sid,
time_rules,
)
def initialize(context):
context.history_values = []
schedule_function(
make_history_call,
date_rules.every_day(),
time_rules.market_open(),
)
schedule_function(
check_market_close_time,
date_rules.every_day(),
time_rules.market_close(),
)
def make_history_call(context, data):
# Ensure that the market open is 6:31am US/Eastern.
open_time = get_datetime().tz_convert('US/Eastern').time()
assert open_time == time(6, 31)
context.history_values.append(
data.history(sid(1), 'close', 5, '1m'),
)
def check_market_close_time(context, data):
# Ensure that this function is called at 4:59pm US/Eastern.
# By default, `market_close()` uses an offset of 1 minute.
close_time = get_datetime().tz_convert('US/Eastern').time()
assert close_time == time(16, 59)
"""
)
algo = TradingAlgorithm(
script=algo_code,
sim_params=self.sim_params,
env=self.env,
trading_calendar=get_calendar('us_futures'),
)
algo.run(self.data_portal)
# Assert that we were able to retrieve history data for minutes outside
# of the 6:31am US/Eastern to 5:00pm US/Eastern futures open times.
np.testing.assert_array_equal(
algo.history_values[0].index,
pd.date_range(
'2016-01-06 6:27',
'2016-01-06 6:31',
freq='min',
tz='US/Eastern',
),
)
np.testing.assert_array_equal(
algo.history_values[1].index,
pd.date_range(
'2016-01-07 6:27',
'2016-01-07 6:31',
freq='min',
tz='US/Eastern',
),
)
# Expected prices here are given by the range values created by the
# default `make_future_minute_bar_data` method.
np.testing.assert_array_equal(
algo.history_values[0].values, list(map(float, range(2196, 2201))),
)
np.testing.assert_array_equal(
algo.history_values[1].values, list(map(float, range(3636, 3641))),
)
@staticmethod
def algo_with_slippage(slippage_model):
return dedent(
"""
from zipline.api import (
commission,
order,
set_commission,
set_slippage,
sid,
slippage,
get_datetime,
)
def initialize(context):
commission_model = commission.PerFutureTrade(0)
set_commission(us_futures=commission_model)
slippage_model = slippage.{model}
set_slippage(us_futures=slippage_model)
context.ordered = False
def handle_data(context, data):
if not context.ordered:
order(sid(1), 10)
context.ordered = True
context.order_price = data.current(sid(1), 'price')
"""
).format(model=slippage_model)
def test_fixed_future_slippage(self):
algo_code = self.algo_with_slippage('FixedSlippage(spread=0.10)')
algo = TradingAlgorithm(
script=algo_code,
sim_params=self.sim_params,
env=self.env,
trading_calendar=get_calendar('us_futures'),
)
results = algo.run(self.data_portal)
# Flatten the list of transactions.
all_txns = [
val for sublist in results['transactions'].tolist()
for val in sublist
]
self.assertEqual(len(all_txns), 1)
txn = all_txns[0]
# Add 1 to the expected price because the order does not fill until the
# bar after the price is recorded.
expected_spread = 0.05
expected_price = (algo.order_price + 1) + expected_spread
# Capital used should be 0 because there is no commission, and the cost
# to enter into a long position on a futures contract is 0.
self.assertEqual(txn['price'], expected_price)
self.assertEqual(results['orders'][0][0]['commission'], 0.0)
self.assertEqual(results.capital_used[0], 0.0)
def test_volume_contract_slippage(self):
algo_code = self.algo_with_slippage(
'VolumeShareSlippage(volume_limit=0.05, price_impact=0.1)',
)
algo = TradingAlgorithm(
script=algo_code,
sim_params=self.sim_params,
env=self.env,
trading_calendar=get_calendar('us_futures'),
)
results = algo.run(self.data_portal)
# There should be no commissions.
self.assertEqual(results['orders'][0][0]['commission'], 0.0)
# Flatten the list of transactions.
all_txns = [
val for sublist in results['transactions'].tolist()
for val in sublist
]
# With a volume limit of 0.05, and a total volume of 100 contracts
# traded per minute, we should require 2 transactions to order 10
# contracts.
self.assertEqual(len(all_txns), 2)
for i, txn in enumerate(all_txns):
# Add 1 to the order price because the order does not fill until
# the bar after the price is recorded.
order_price = algo.order_price + i + 1
expected_impact = order_price * 0.1 * (0.05 ** 2)
expected_price = order_price + expected_impact
self.assertEqual(txn['price'], expected_price)
class TestTradingAlgorithm(WithTradingEnvironment, ZiplineTestCase):
def test_analyze_called(self):
self.perf_ref = None
def initialize(context):
pass
def handle_data(context, data):
pass
def analyze(context, perf):
self.perf_ref = perf
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
analyze=analyze,
env=self.env,
)
data_portal = FakeDataPortal(self.env)
results = algo.run(data_portal)
self.assertIs(results, self.perf_ref)
class TestOrderCancelation(WithDataPortal,
WithSimParams,
ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
ASSET_FINDER_EQUITY_SIDS = (1,)
ASSET_FINDER_EQUITY_SYMBOLS = ('ASSET1',)
code = dedent(
"""
from zipline.api import (
sid, order, set_slippage, slippage, VolumeShareSlippage,
set_cancel_policy, cancel_policy, EODCancel
)
def initialize(context):
set_slippage(
slippage.VolumeShareSlippage(
volume_limit=1,
price_impact=0
)
)
{0}
context.ordered = False
def handle_data(context, data):
if not context.ordered:
order(sid(1), {1})
context.ordered = True
""",
)
@classmethod
def make_equity_minute_bar_data(cls):
asset_minutes = \
cls.trading_calendar.minutes_for_sessions_in_range(
cls.sim_params.start_session,
cls.sim_params.end_session,
)
minutes_count = len(asset_minutes)
minutes_arr = np.arange(1, 1 + minutes_count)
# normal test data, but volume is pinned at 1 share per minute
yield 1, pd.DataFrame(
{
'open': minutes_arr + 1,
'high': minutes_arr + 2,
'low': minutes_arr - 1,
'close': minutes_arr,
'volume': np.full(minutes_count, 1.0),
},
index=asset_minutes,
)
@classmethod
def make_equity_daily_bar_data(cls):
yield 1, pd.DataFrame(
{
'open': np.full(3, 1, dtype=np.float64),
'high': np.full(3, 1, dtype=np.float64),
'low': np.full(3, 1, dtype=np.float64),
'close': np.full(3, 1, dtype=np.float64),
'volume': np.full(3, 1, dtype=np.float64),
},
index=cls.sim_params.sessions,
)
def prep_algo(self, cancelation_string, data_frequency="minute",
amount=1000, minute_emission=False):
code = self.code.format(cancelation_string, amount)
algo = TradingAlgorithm(
script=code,
env=self.env,
sim_params=SimulationParameters(
start_session=self.sim_params.start_session,
end_session=self.sim_params.end_session,
trading_calendar=self.trading_calendar,
data_frequency=data_frequency,
emission_rate='minute' if minute_emission else 'daily'
)
)
return algo
@parameter_space(
direction=[1, -1],
minute_emission=[True, False]
)
def test_eod_order_cancel_minute(self, direction, minute_emission):
"""
Test that EOD order cancel works in minute mode for both shorts and
longs, and both daily emission and minute emission
"""
# order 1000 shares of asset1. the volume is only 1 share per bar,
# so the order should be cancelled at the end of the day.
algo = self.prep_algo(
"set_cancel_policy(cancel_policy.EODCancel())",
amount=np.copysign(1000, direction),
minute_emission=minute_emission
)
log_catcher = TestHandler()
with log_catcher:
results = algo.run(self.data_portal)
for daily_positions in results.positions:
self.assertEqual(1, len(daily_positions))
self.assertEqual(
np.copysign(389, direction),
daily_positions[0]["amount"],
)
self.assertEqual(1, results.positions[0][0]["sid"])
# should be an order on day1, but no more orders afterwards
np.testing.assert_array_equal([1, 0, 0],
list(map(len, results.orders)))
# should be 389 txns on day 1, but no more afterwards
np.testing.assert_array_equal([389, 0, 0],
list(map(len, results.transactions)))
the_order = results.orders[0][0]
self.assertEqual(ORDER_STATUS.CANCELLED, the_order["status"])
self.assertEqual(np.copysign(389, direction), the_order["filled"])
warnings = [record for record in log_catcher.records if
record.level == WARNING]
self.assertEqual(1, len(warnings))
if direction == 1:
self.assertEqual(
"Your order for 1000 shares of ASSET1 has been partially "
"filled. 389 shares were successfully purchased. "
"611 shares were not filled by the end of day and "
"were canceled.",
str(warnings[0].message)
)
elif direction == -1:
self.assertEqual(
"Your order for -1000 shares of ASSET1 has been partially "
"filled. 389 shares were successfully sold. "
"611 shares were not filled by the end of day and "
"were canceled.",
str(warnings[0].message)
)
def test_default_cancelation_policy(self):
algo = self.prep_algo("")
log_catcher = TestHandler()
with log_catcher:
results = algo.run(self.data_portal)
# order stays open throughout simulation
np.testing.assert_array_equal([1, 1, 1],
list(map(len, results.orders)))
# one txn per minute. 389 the first day (since no order until the
# end of the first minute). 390 on the second day. 221 on the
# the last day, sum = 1000.
np.testing.assert_array_equal([389, 390, 221],
list(map(len, results.transactions)))
self.assertFalse(log_catcher.has_warnings)
def test_eod_order_cancel_daily(self):
# in daily mode, EODCancel does nothing.
algo = self.prep_algo(
"set_cancel_policy(cancel_policy.EODCancel())",
"daily"
)
log_catcher = TestHandler()
with log_catcher:
results = algo.run(self.data_portal)
# order stays open throughout simulation
np.testing.assert_array_equal([1, 1, 1],
list(map(len, results.orders)))
# one txn per day
np.testing.assert_array_equal([0, 1, 1],
list(map(len, results.transactions)))
self.assertFalse(log_catcher.has_warnings)
class TestEquityAutoClose(WithTradingEnvironment, WithTmpDir, ZiplineTestCase):
"""
Tests if delisted equities are properly removed from a portfolio holding
positions in said equities.
"""
@classmethod
def init_class_fixtures(cls):
super(TestEquityAutoClose, cls).init_class_fixtures()
trading_sessions = cls.trading_calendar.all_sessions
start_date = pd.Timestamp('2015-01-05', tz='UTC')
start_date_loc = trading_sessions.get_loc(start_date)
test_duration = 7
cls.test_days = trading_sessions[
start_date_loc:start_date_loc + test_duration
]
cls.first_asset_expiration = cls.test_days[2]
def make_data(self, auto_close_delta, frequency,
capital_base=1.0e5):
asset_info = make_jagged_equity_info(
num_assets=3,
start_date=self.test_days[0],
first_end=self.first_asset_expiration,
frequency=self.trading_calendar.day,
periods_between_ends=2,
auto_close_delta=auto_close_delta,
)
sids = asset_info.index
env = self.enter_instance_context(
tmp_trading_env(equities=asset_info,
load=self.make_load_function())
)
if frequency == 'daily':
dates = self.test_days
trade_data_by_sid = make_trade_data_for_asset_info(
dates=dates,
asset_info=asset_info,
price_start=10,
price_step_by_sid=10,
price_step_by_date=1,
volume_start=100,
volume_step_by_sid=100,
volume_step_by_date=10,
frequency=frequency
)
path = self.tmpdir.getpath("testdaily.bcolz")
writer = BcolzDailyBarWriter(
path, self.trading_calendar, dates[0], dates[-1]
)
writer.write(iteritems(trade_data_by_sid))
reader = BcolzDailyBarReader(path)
data_portal = DataPortal(
env.asset_finder, self.trading_calendar,
first_trading_day=reader.first_trading_day,
equity_daily_reader=reader,
)
elif frequency == 'minute':
dates = self.trading_calendar.minutes_for_sessions_in_range(
self.test_days[0],
self.test_days[-1],
)
writer = BcolzMinuteBarWriter(
self.tmpdir.path,
self.trading_calendar,
self.test_days[0],
self.test_days[-1],
US_EQUITIES_MINUTES_PER_DAY
)
trade_data_by_sid = make_trade_data_for_asset_info(
writer=writer,
dates=dates,
asset_info=asset_info,
price_start=10,
price_step_by_sid=10,
price_step_by_date=1,
volume_start=100,
volume_step_by_sid=100,
volume_step_by_date=10,
frequency=frequency
)
reader = BcolzMinuteBarReader(self.tmpdir.path)
data_portal = DataPortal(
env.asset_finder, self.trading_calendar,
first_trading_day=reader.first_trading_day,
equity_minute_reader=reader,
)
else:
self.fail("Unknown frequency in make_data: %r" % frequency)
assets = env.asset_finder.retrieve_all(sids)
sim_params = factory.create_simulation_parameters(
start=self.test_days[0],
end=self.test_days[-1],
data_frequency=frequency,
emission_rate=frequency,
capital_base=capital_base,
)
if frequency == 'daily':
final_prices = {
asset.sid: trade_data_by_sid[asset.sid].
loc[asset.end_date].close
for asset in assets
}
else:
final_prices = {
asset.sid: trade_data_by_sid[asset.sid].loc[
self.trading_calendar.session_close(asset.end_date)
].close
for asset in assets
}
TestData = namedtuple(
'TestData',
[
'asset_info',
'assets',
'env',
'data_portal',
'final_prices',
'trade_data_by_sid',
'sim_params'
],
)
return TestData(
asset_info=asset_info,
assets=assets,
env=env,
data_portal=data_portal,
final_prices=final_prices,
trade_data_by_sid=trade_data_by_sid,
sim_params=sim_params
)
def prices_on_tick(self, trades_by_sid, row):
return [trades.iloc[row].close
for trades in itervalues(trades_by_sid)]
def default_initialize(self):
"""
Initialize function shared between test algos.
"""
def initialize(context):
context.ordered = False
context.set_commission(PerShare(0, 0))
context.set_slippage(FixedSlippage(spread=0))
context.num_positions = []
context.cash = []
return initialize
def default_handle_data(self, assets, order_size):
"""
Handle data function shared between test algos.
"""
def handle_data(context, data):
if not context.ordered:
for asset in assets:
context.order(asset, order_size)
context.ordered = True
context.cash.append(context.portfolio.cash)
context.num_positions.append(len(context.portfolio.positions))
return handle_data
@parameter_space(
order_size=[10, -10],
capital_base=[0, 100000],
auto_close_lag=[1, 2],
)
def test_daily_delisted_equities(self,
order_size,
capital_base,
auto_close_lag):
"""
Make sure that after an equity gets delisted, our portfolio holds the
correct number of equities and correct amount of cash.
"""
auto_close_delta = self.trading_calendar.day * auto_close_lag
resources = self.make_data(auto_close_delta, 'daily', capital_base)
assets = resources.assets
final_prices = resources.final_prices
# Prices at which we expect our orders to be filled.
initial_fill_prices = \
self.prices_on_tick(resources.trade_data_by_sid, 1)
cost_basis = sum(initial_fill_prices) * order_size
# Last known prices of assets that will be auto-closed.
fp0 = final_prices[0]
fp1 = final_prices[1]
algo = TradingAlgorithm(
initialize=self.default_initialize(),
handle_data=self.default_handle_data(assets, order_size),
env=resources.env,
sim_params=resources.sim_params
)
output = algo.run(resources.data_portal)
initial_cash = capital_base
after_fills = initial_cash - cost_basis
after_first_auto_close = after_fills + fp0 * (order_size)
after_second_auto_close = after_first_auto_close + fp1 * (order_size)
if auto_close_lag == 1:
# Day 1: Order 10 shares of each equity; there are 3 equities.
# Day 2: Order goes through at the day 2 price of each equity.
# Day 3: End date of Equity 0.
# Day 4: Auto close date of Equity 0. Add cash == (fp0 * size).
# Day 5: End date of Equity 1.
# Day 6: Auto close date of Equity 1. Add cash == (fp1 * size).
# Day 7: End date of Equity 2 and last day of backtest; no changes.
expected_cash = [
initial_cash,
after_fills,
after_fills,
after_first_auto_close,
after_first_auto_close,
after_second_auto_close,
after_second_auto_close,
]
expected_num_positions = [0, 3, 3, 2, 2, 1, 1]
elif auto_close_lag == 2:
# Day 1: Order 10 shares of each equity; there are 3 equities.
# Day 2: Order goes through at the day 2 price of each equity.
# Day 3: End date of Equity 0.
# Day 4: Nothing happens.
# Day 5: End date of Equity 1. Auto close of equity 0.
# Add cash == (fp0 * size).
# Day 6: Nothing happens.
# Day 7: End date of Equity 2 and auto-close date of Equity 1.
# Add cash equal to (fp1 * size).
expected_cash = [
initial_cash,
after_fills,
after_fills,
after_fills,
after_first_auto_close,
after_first_auto_close,
after_second_auto_close,
]
expected_num_positions = [0, 3, 3, 3, 2, 2, 1]
else:
self.fail(
"Don't know about auto_close lags other than 1 or 2. "
"Add test answers please!"
)
# Check expected cash.
self.assertEqual(expected_cash, list(output['ending_cash']))
# The cash recorded by the algo should be behind by a day from the
# computed ending cash.
expected_cash.insert(3, after_fills)
self.assertEqual(algo.cash, expected_cash[:-1])
# Check expected long/short counts.
# We have longs if order_size > 0.
# We have shorts if order_size < 0.
if order_size > 0:
self.assertEqual(
expected_num_positions,
list(output['longs_count']),
)
self.assertEqual(
[0] * len(self.test_days),
list(output['shorts_count']),
)
else:
self.assertEqual(
expected_num_positions,
list(output['shorts_count']),
)
self.assertEqual(
[0] * len(self.test_days),
list(output['longs_count']),
)
# The number of positions recorded by the algo should be behind by a
# day from the computed long/short counts.
expected_num_positions.insert(3, 3)
self.assertEqual(algo.num_positions, expected_num_positions[:-1])
# Check expected transactions.
# We should have a transaction of order_size shares per sid.
transactions = output['transactions']
initial_fills = transactions.iloc[1]
self.assertEqual(len(initial_fills), len(assets))
last_minute_of_session = \
self.trading_calendar.session_close(self.test_days[1])
for asset, txn in zip(assets, initial_fills):
self.assertDictContainsSubset(
{
'amount': order_size,
'commission': None,
'dt': last_minute_of_session,
'price': initial_fill_prices[asset],
'sid': asset,
},
txn,
)
# This will be a UUID.
self.assertIsInstance(txn['order_id'], str)
def transactions_for_date(date):
return transactions.iloc[self.test_days.get_loc(date)]
# We should have exactly one auto-close transaction on the close date
# of asset 0.
(first_auto_close_transaction,) = transactions_for_date(
assets[0].auto_close_date
)
self.assertEqual(
first_auto_close_transaction,
{
'amount': -order_size,
'commission': 0.0,
'dt': self.trading_calendar.session_close(
assets[0].auto_close_date,
),
'price': fp0,
'sid': assets[0],
'order_id': None, # Auto-close txns emit Nones for order_id.
},
)
(second_auto_close_transaction,) = transactions_for_date(
assets[1].auto_close_date
)
self.assertEqual(
second_auto_close_transaction,
{
'amount': -order_size,
'commission': 0.0,
'dt': self.trading_calendar.session_close(
assets[1].auto_close_date,
),
'price': fp1,
'sid': assets[1],
'order_id': None, # Auto-close txns emit Nones for order_id.
},
)
def test_cancel_open_orders(self):
"""
Test that any open orders for an equity that gets delisted are
canceled. Unless an equity is auto closed, any open orders for that
equity will persist indefinitely.
"""
auto_close_delta = self.trading_calendar.day
resources = self.make_data(auto_close_delta, 'daily')
env = resources.env
assets = resources.assets
first_asset_end_date = assets[0].end_date
first_asset_auto_close_date = assets[0].auto_close_date
def initialize(context):
pass
def handle_data(context, data):
# The only order we place in this test should never be filled.
assert (
context.portfolio.cash == context.portfolio.starting_cash
)
today_session = self.trading_calendar.minute_to_session_label(
context.get_datetime()
)
day_after_auto_close = self.trading_calendar.next_session_label(
first_asset_auto_close_date,
)
if today_session == first_asset_end_date:
# Equity 0 will no longer exist tomorrow, so this order will
# never be filled.
assert len(context.get_open_orders()) == 0
context.order(context.sid(0), 10)
assert len(context.get_open_orders()) == 1
elif today_session == first_asset_auto_close_date:
# We do not cancel open orders until the end of the auto close
# date, so our open order should still exist at this point.
assert len(context.get_open_orders()) == 1
elif today_session == day_after_auto_close:
assert len(context.get_open_orders()) == 0
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
env=env,
sim_params=resources.sim_params
)
results = algo.run(resources.data_portal)
orders = results['orders']
def orders_for_date(date):
return orders.iloc[self.test_days.get_loc(date)]
original_open_orders = orders_for_date(first_asset_end_date)
assert len(original_open_orders) == 1
last_close_for_asset = \
algo.trading_calendar.session_close(first_asset_end_date)
self.assertDictContainsSubset(
{
'amount': 10,
'commission': 0,
'created': last_close_for_asset,
'dt': last_close_for_asset,
'sid': assets[0],
'status': ORDER_STATUS.OPEN,
'filled': 0,
},
original_open_orders[0],
)
orders_after_auto_close = orders_for_date(first_asset_auto_close_date)
assert len(orders_after_auto_close) == 1
self.assertDictContainsSubset(
{
'amount': 10,
'commission': 0,
'created': last_close_for_asset,
'dt': algo.trading_calendar.session_close(
first_asset_auto_close_date,
),
'sid': assets[0],
'status': ORDER_STATUS.CANCELLED,
'filled': 0,
},
orders_after_auto_close[0],
)
def test_minutely_delisted_equities(self):
resources = self.make_data(self.trading_calendar.day, 'minute')
env = resources.env
assets = resources.assets
final_prices = resources.final_prices
backtest_minutes = resources.trade_data_by_sid[0].index.tolist()
order_size = 10
capital_base = 100000
algo = TradingAlgorithm(
initialize=self.default_initialize(),
handle_data=self.default_handle_data(assets, order_size),
env=env,
sim_params=resources.sim_params,
data_frequency='minute',
)
output = algo.run(resources.data_portal)
initial_fill_prices = \
self.prices_on_tick(resources.trade_data_by_sid, 1)
cost_basis = sum(initial_fill_prices) * order_size
# Last known prices of assets that will be auto-closed.
fp0 = final_prices[0]
fp1 = final_prices[1]
initial_cash = capital_base
after_fills = initial_cash - cost_basis
after_first_auto_close = after_fills + fp0 * (order_size)
after_second_auto_close = after_first_auto_close + fp1 * (order_size)
expected_cash = [initial_cash]
expected_position_counts = [0]
# We have the rest of the first sim day, plus the second, third and
# fourth days' worth of minutes with cash spent.
expected_cash.extend([after_fills] * (389 + 390 + 390 + 390))
expected_position_counts.extend([3] * (389 + 390 + 390 + 390))
# We then have two days with the cash refunded from asset 0.
expected_cash.extend([after_first_auto_close] * (390 + 390))
expected_position_counts.extend([2] * (390 + 390))
# We then have one day with cash refunded from asset 1.
expected_cash.extend([after_second_auto_close] * 390)
expected_position_counts.extend([1] * 390)
# Check list lengths first to avoid expensive comparison
self.assertEqual(len(algo.cash), len(expected_cash))
# TODO find more efficient way to compare these lists
self.assertEqual(algo.cash, expected_cash)
self.assertEqual(
list(output['ending_cash']),
[
after_fills,
after_fills,
after_fills,
after_first_auto_close,
after_first_auto_close,
after_second_auto_close,
after_second_auto_close,
],
)
self.assertEqual(algo.num_positions, expected_position_counts)
self.assertEqual(
list(output['longs_count']),
[3, 3, 3, 2, 2, 1, 1],
)
# Check expected transactions.
# We should have a transaction of order_size shares per sid.
transactions = output['transactions']
# Note that the transactions appear on the first day rather than the
# second in minute mode, because the fills happen on the second tick of
# the backtest, which is still on the first day in minute mode.
initial_fills = transactions.iloc[0]
self.assertEqual(len(initial_fills), len(assets))
for asset, txn in zip(assets, initial_fills):
self.assertDictContainsSubset(
{
'amount': order_size,
'commission': None,
'dt': backtest_minutes[1],
'price': initial_fill_prices[asset],
'sid': asset,
},
txn,
)
# This will be a UUID.
self.assertIsInstance(txn['order_id'], str)
def transactions_for_date(date):
return transactions.iloc[self.test_days.get_loc(date)]
# We should have exactly one auto-close transaction on the close date
# of asset 0.
(first_auto_close_transaction,) = transactions_for_date(
assets[0].auto_close_date
)
self.assertEqual(
first_auto_close_transaction,
{
'amount': -order_size,
'commission': 0.0,
'dt': algo.trading_calendar.session_close(
assets[0].auto_close_date,
),
'price': fp0,
'sid': assets[0],
'order_id': None, # Auto-close txns emit Nones for order_id.
},
)
(second_auto_close_transaction,) = transactions_for_date(
assets[1].auto_close_date
)
self.assertEqual(
second_auto_close_transaction,
{
'amount': -order_size,
'commission': 0.0,
'dt': algo.trading_calendar.session_close(
assets[1].auto_close_date,
),
'price': fp1,
'sid': assets[1],
'order_id': None, # Auto-close txns emit Nones for order_id.
},
)
class TestOrderAfterDelist(WithTradingEnvironment, ZiplineTestCase):
start = pd.Timestamp('2016-01-05', tz='utc')
day_1 = pd.Timestamp('2016-01-06', tz='utc')
day_4 = pd.Timestamp('2016-01-11', tz='utc')
end = pd.Timestamp('2016-01-15', tz='utc')
@classmethod
def make_equity_info(cls):
return pd.DataFrame.from_dict(
{
# Asset whose auto close date is after its end date.
1: {
'start_date': cls.start,
'end_date': cls.day_1,
'auto_close_date': cls.day_4,
'symbol': "ASSET1",
'exchange': "TEST",
},
# Asset whose auto close date is before its end date.
2: {
'start_date': cls.start,
'end_date': cls.day_4,
'auto_close_date': cls.day_1,
'symbol': 'ASSET2',
'exchange': 'TEST',
},
},
orient='index',
)
@classmethod
def init_class_fixtures(cls):
super(TestOrderAfterDelist, cls).init_class_fixtures()
cls.data_portal = FakeDataPortal(cls.env)
@parameterized.expand([
('auto_close_after_end_date', 1),
('auto_close_before_end_date', 2),
])
def test_order_in_quiet_period(self, name, sid):
asset = self.asset_finder.retrieve_asset(sid)
algo_code = dedent("""
from zipline.api import (
sid,
order,
order_value,
order_percent,
order_target,
order_target_percent,
order_target_value
)
def initialize(context):
pass
def handle_data(context, data):
order(sid({sid}), 1)
order_value(sid({sid}), 100)
order_percent(sid({sid}), 0.5)
order_target(sid({sid}), 50)
order_target_percent(sid({sid}), 0.5)
order_target_value(sid({sid}), 50)
""").format(sid=sid)
# run algo from 1/6 to 1/7
algo = TradingAlgorithm(
script=algo_code,
env=self.env,
sim_params=SimulationParameters(
start_session=pd.Timestamp("2016-01-06", tz='UTC'),
end_session=pd.Timestamp("2016-01-07", tz='UTC'),
trading_calendar=self.trading_calendar,
data_frequency="minute"
)
)
with make_test_handler(self) as log_catcher:
algo.run(self.data_portal)
warnings = [r for r in log_catcher.records
if r.level == logbook.WARNING]
# one warning per order on the second day
self.assertEqual(6 * 390, len(warnings))
for w in warnings:
expected_message = (
'Cannot place order for ASSET{sid}, as it has de-listed. '
'Any existing positions for this asset will be liquidated '
'on {date}.'.format(sid=sid, date=asset.auto_close_date)
)
self.assertEqual(expected_message, w.message)
class AlgoInputValidationTestCase(WithTradingEnvironment, ZiplineTestCase):
def test_reject_passing_both_api_methods_and_script(self):
script = dedent(
"""
def initialize(context):
pass
def handle_data(context, data):
pass
def before_trading_start(context, data):
pass
def analyze(context, results):
pass
"""
)
for method in ('initialize',
'handle_data',
'before_trading_start',
'analyze'):
with self.assertRaises(ValueError):
TradingAlgorithm(
script=script,
env=self.env,
**{method: lambda *args, **kwargs: None}
)
class TestPanelData(WithTradingEnvironment, ZiplineTestCase):
def create_panel(self, sids, trading_calendar, start_dt, end_dt,
create_df_for_asset, prev_close_column=False):
dfs = {}
for sid in sids:
dfs[sid] = create_df_for_asset(trading_calendar,
start_dt, end_dt, interval=sid)
if prev_close_column:
dfs[sid]['prev_close'] = dfs[sid]['close'].shift(1)
return pd.Panel(dfs)
@parameterized.expand([
('daily',
pd.Timestamp('2015-12-23', tz='UTC'),
pd.Timestamp('2016-01-05', tz='UTC'),),
('minute',
pd.Timestamp('2015-12-23', tz='UTC'),
pd.Timestamp('2015-12-24', tz='UTC'),),
])
def test_panel_data(self, data_frequency, start_dt, end_dt):
trading_calendar = get_calendar('NYSE')
if data_frequency == 'daily':
history_freq = '1d'
create_df_for_asset = create_daily_df_for_asset
dt_transform = trading_calendar.minute_to_session_label
elif data_frequency == 'minute':
history_freq = '1m'
create_df_for_asset = create_minute_df_for_asset
def dt_transform(dt):
return dt
else:
raise AssertionError('Unexpected data_frequency: %s' %
data_frequency)
sids = range(1, 3)
panel = self.create_panel(sids, trading_calendar, start_dt, end_dt,
create_df_for_asset, prev_close_column=True)
price_record = pd.Panel(items=sids,
major_axis=panel.major_axis,
minor_axis=['current', 'previous'])
def initialize(algo):
algo.first_bar = True
algo.equities = [algo.sid(sid) for sid in sids]
def handle_data(algo, data):
price_record.loc[:, dt_transform(algo.get_datetime()),
'current'] = (
data.current(algo.equities, 'price')
)
if algo.first_bar:
algo.first_bar = False
else:
price_record.loc[:, dt_transform(algo.get_datetime()),
'previous'] = (
data.history(algo.equities, 'price',
2, history_freq).iloc[0]
)
def check_panels():
np.testing.assert_array_equal(
price_record.values.astype('float64'),
panel.loc[:, :, ['close',
'prev_close']].values.astype('float64')
)
with tmp_trading_env(load=self.make_load_function()) as env:
trading_algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data,
env=env)
trading_algo.run(data=panel)
check_panels()
price_record.loc[:] = np.nan
with tmp_dir() as tmpdir:
root = tmpdir.getpath('example_data/root')
copy_market_data(self.MARKET_DATA_DIR, root)
run_algorithm(
start=start_dt,
end=end_dt,
capital_base=1,
initialize=initialize,
handle_data=handle_data,
data_frequency=data_frequency,
data=panel,
environ={'ZIPLINE_ROOT': root},
)
check_panels()
def test_minute_panel_daily_history(self):
sids = range(1, 3)
trading_calendar = get_calendar('NYSE')
start_dt = pd.Timestamp('2015-12-23', tz='UTC')
end_dt = pd.Timestamp('2015-12-30', tz='UTC')
panel = self.create_panel(
sids,
trading_calendar,
start_dt,
end_dt,
create_minute_df_for_asset,
)
def check_open_price(algo, data):
if algo.first_day:
algo.first_day = False
else:
np.testing.assert_array_equal(
algo.last_open,
data.history(
algo.equities,
'open',
2,
'1d',
).iloc[0]
)
algo.last_open = data.current(algo.equities, 'open')
def initialize(algo):
algo.first_day = True
algo.equities = [algo.sid(sid) for sid in sids]
algo.schedule_function(
check_open_price,
date_rules.every_day(),
time_rules.market_open(),
)
with tmp_trading_env(load=self.make_load_function()) as env:
trading_algo = TradingAlgorithm(initialize=initialize,
env=env)
trading_algo.run(data=panel)
| apache-2.0 |
altairpearl/scikit-learn | sklearn/tree/export.py | 12 | 16020 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# License: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
daniaki/Enrich2 | enrich2/base/utils.py | 1 | 8780 | # Copyright 2016-2017 Alan F Rubin, Daniel C Esposito
#
# This file is part of Enrich2.
#
# Enrich2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Enrich2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Enrich2. If not, see <http://www.gnu.org/licenses/>.
"""
Enrich2 base utility module
===========================
Contains various utility functions used through-out enrich2
"""
import pandas as pd
from queue import Queue
import hashlib
import os
import logging
import traceback
from ..base.constants import CALLBACK, MESSAGE, KWARGS
__all__ = [
"nested_format",
"fix_filename",
"multi_index_tsv_to_dataframe",
'infer_multiindex_header_rows',
'is_number',
'compute_md5',
'init_logging_queue',
'get_logging_queue',
'log_message'
]
LOG_QUEUE = None
def init_logging_queue():
"""
Inits the logging queue if it is ``None``.
"""
global LOG_QUEUE
if LOG_QUEUE is None:
LOG_QUEUE = Queue()
log_message(
logging.info,
'Logging Queue has been initialized.',
extra={'oname': 'Utilities'}
)
def get_logging_queue(init=False):
"""
Gets the current active queue instance.
Parameters
----------
init : `bool`
Init the queue before returning it.
Returns
-------
:py:class:`Queue`
"""
if init:
init_logging_queue()
return LOG_QUEUE
def log_message(logging_callback, msg, **kwargs):
"""
Places a logging message into the active queue.
Parameters
----------
logging_callback : `Callable`
The logging function to use from the logging module.
msg : `str` or `Exception`
The message to log.
kwargs : `dict`
Keyword arguments for logging module.
"""
log = {CALLBACK: logging_callback, MESSAGE: msg, KWARGS: kwargs}
queue = get_logging_queue(init=False)
if queue is None:
logging_callback(msg, **kwargs)
if isinstance(msg, Exception):
tb = msg.__traceback__
logging.exception(''.join(traceback.format_tb(tb)), **kwargs)
else:
queue.put(log)
if isinstance(msg, Exception):
tb = msg.__traceback__
error = {
CALLBACK: logging.exception,
MESSAGE: ''.join(traceback.format_tb(tb)),
KWARGS: kwargs
}
queue.put(error)
def nested_format(data, default, tab_level=1):
"""
Print a human readable nested dictionary or nested list.
Parameters
----------
data : `object`
Data to print.
default: `bool`
Indicator indicating if a value is a default.
tab_level : `int`
Number of tabs to indent with.
Returns
-------
`str`
A formatted string.
"""
msg = ""
if isinstance(data, list) or isinstance(data, tuple):
if not data:
msg += 'Empty Iterable'
else:
msg += "-> Iterable"
if default:
msg += "-> Iterable [Default]"
try:
for i, (value, default) in enumerate(data):
msg += '\n' + '\t' * tab_level + '@index {}: '.format(i)
msg += nested_format(value, default, tab_level)
except (TypeError, ValueError):
for i, value in enumerate(data):
msg += '\n' + '\t' * tab_level + '@index {}: '.format(i)
msg += nested_format(value, False, tab_level)
msg += '\n' + '\t' * tab_level + '@end of list'
elif isinstance(data, dict):
if not data:
msg += 'Empty Dictionary'
else:
msg += "-> Dictionary"
if default:
msg += "-> Dictionary [Default]"
try:
for key, (value, default) in data.items():
msg += '\n' + "\t" * tab_level + "{}: ".format(key)
msg += nested_format(value, default, tab_level + 1)
except (TypeError, ValueError):
for key, value in data.items():
msg += '\n' + "\t" * tab_level + "{}: ".format(key)
msg += nested_format(value, False, tab_level + 1)
else:
if isinstance(data, str):
data = "'{}'".format(data)
dtype = type(data).__name__
if default:
msg += "({} [Default], {})".format(data, dtype)
else:
msg += "({}, {})".format(data, dtype)
return msg
def multi_index_tsv_to_dataframe(filepath, sep='\t', header_rows=None):
"""
Loads a multi-header tsv file into a :py:class:`pd.DataFrame`.
Parameters
----------
filepath : `str`
Path pointing to the tsv file.
sep : `str`, optional, default: '\t'
Character to use as the delimiter.
header_rows : `list`, optional, default: None
0-based indicies corresponding to the row locations to use as the
multi-index column names in the dataframe. Example:
condition E3 E3
value pvalue_raw z
_sy 8.6e-05 3.92
p.Ala16Arg 0.0 3.76raw_barcodes_counts.tsv
The *header_rows* for this instance will be [0, 1]
If not supplied, `header_rows` will be inferred from the file.
Returns
-------
:py:class:`~pd.DataFrame`
A :py:class:`pd.MultiIndex` dataframe.
"""
if header_rows is None:
header_rows = infer_multiindex_header_rows(filepath)
if header_rows == [0] or not header_rows:
return pd.read_table(filepath, index_col=0, sep=sep)
else:
try:
return pd.read_table(
filepath, index_col=0, sep=sep, header=header_rows)
except IndexError:
return pd.read_table(filepath, index_col=0, sep=sep)
def infer_multiindex_header_rows(filepath):
"""
Infers which columns from a tsv file should be used as a header when
loading a multi-index or single-index tsv. NaN values in the tsv
must be encoded with the string 'NaN' for this function to correctly
infer header columns.
Parameters
----------
filepath : `str`
Path pointing to the tsv file.
Returns
-------
`list`
0-based indicies corresponding to the row locations to use as the
multi-index column names in the dataframe. Example:
condition E3 E3
value pvalue_raw z
_sy 8.6e-05 3.92
p.Ala16Arg 0.0 3.76
The *header_rows* for this instance will be [0, 1]
"""
header_rows = []
with open(filepath, 'rt') as fp:
for i, line in enumerate(fp):
xs = [x.strip() for x in line.split('\t') if x.strip()]
skip_line = (not xs) or any([is_number(s) for s in xs])
if skip_line:
break
else:
header_rows.append(i)
return header_rows
def is_number(s):
"""
Check if a string s represents a number
Parameters
----------
s : `str`
String to check
Returns
-------
`bool`
``True`` if a string represents an integer or floating point number
"""
try:
int(s)
is_int = True
except ValueError:
is_int = False
try:
float(s)
is_float = True
except ValueError:
is_float = False
return is_float | is_int
def fix_filename(s):
"""
Clean up a file name by removing invalid characters and converting
spaces to underscores.
Parameters
----------
s : `str`
File name
Returns
-------
`str`
Cleaned file name
"""
fname = "".join(c for c in s if c.isalnum() or c in (' ._~'))
fname = fname.replace(' ', '_')
return fname
def compute_md5(fname):
"""
Returns the MD5 sum of a file at some path, or an empty string
if the file does not exist.
Parameters
----------
fname : `str`
Path to file.
Returns
-------
`str`
MD5 string of the hashed file.
"""
md5 = ""
if fname is None:
return md5
if os.path.isfile(fname):
fp = open(fname, 'rb')
md5 = hashlib.md5(fp.read()).hexdigest()
fp.close()
return md5
| gpl-3.0 |
wzbozon/statsmodels | statsmodels/sandbox/examples/try_quantile_regression1.py | 33 | 1188 | '''Example to illustrate Quantile Regression
Author: Josef Perktold
polynomial regression with systematic deviations above
'''
import numpy as np
from statsmodels.compat.python import zip
from scipy import stats
import statsmodels.api as sm
from statsmodels.regression.quantile_regression import QuantReg
sige = 0.1
nobs, k_vars = 500, 3
x = np.random.uniform(-1, 1, size=nobs)
x.sort()
exog = np.vander(x, k_vars+1)[:,::-1]
mix = 0.1 * stats.norm.pdf(x[:,None], loc=np.linspace(-0.5, 0.75, 4), scale=0.01).sum(1)
y = exog.sum(1) + mix + sige * (np.random.randn(nobs)/2 + 1)**3
p = 0.5
res_qr = QuantReg(y, exog).fit(p)
res_qr2 = QuantReg(y, exog).fit(0.1)
res_qr3 = QuantReg(y, exog).fit(0.75)
res_ols = sm.OLS(y, exog).fit()
params = [res_ols.params, res_qr2.params, res_qr.params, res_qr3.params]
labels = ['ols', 'qr 0.1', 'qr 0.5', 'qr 0.75']
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, '.', alpha=0.5)
for lab, beta in zip(['ols', 'qr 0.1', 'qr 0.5', 'qr 0.75'], params):
print('%-8s'%lab, np.round(beta, 4))
fitted = np.dot(exog, beta)
lw = 2
plt.plot(x, fitted, lw=lw, label=lab)
plt.legend()
plt.title('Quantile Regression')
plt.show()
| bsd-3-clause |
mattcaldwell/zipline | tests/test_history.py | 2 | 36566 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from itertools import product
from textwrap import dedent
from nose_parameterized import parameterized
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from zipline.history import history
from zipline.history.history_container import HistoryContainer
from zipline.protocol import BarData
import zipline.utils.factory as factory
from zipline import TradingAlgorithm
from zipline.finance.trading import (
SimulationParameters,
TradingEnvironment,
with_environment,
)
from zipline.errors import IncompatibleHistoryFrequency
from zipline.sources import RandomWalkSource, DataFrameSource
from .history_cases import (
HISTORY_CONTAINER_TEST_CASES,
)
# Cases are over the July 4th holiday, to ensure use of trading calendar.
# March 2013
# Su Mo Tu We Th Fr Sa
# 1 2
# 3 4 5 6 7 8 9
# 10 11 12 13 14 15 16
# 17 18 19 20 21 22 23
# 24 25 26 27 28 29 30
# 31
# April 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30
#
# May 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
#
# June 2013
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
# July 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
#
# Times to be converted via:
# pd.Timestamp('2013-07-05 9:31', tz='US/Eastern').tz_convert('UTC')},
INDEX_TEST_CASES_RAW = {
'week of daily data': {
'input': {'bar_count': 5,
'frequency': '1d',
'algo_dt': '2013-07-05 9:31AM'},
'expected': [
'2013-06-28 4:00PM',
'2013-07-01 4:00PM',
'2013-07-02 4:00PM',
'2013-07-03 1:00PM',
'2013-07-05 9:31AM',
]
},
'five minutes on july 5th open': {
'input': {'bar_count': 5,
'frequency': '1m',
'algo_dt': '2013-07-05 9:31AM'},
'expected': [
'2013-07-03 12:57PM',
'2013-07-03 12:58PM',
'2013-07-03 12:59PM',
'2013-07-03 1:00PM',
'2013-07-05 9:31AM',
]
},
}
def to_timestamp(dt_str):
return pd.Timestamp(dt_str, tz='US/Eastern').tz_convert('UTC')
def convert_cases(cases):
"""
Convert raw strings to values comparable with system data.
"""
cases = cases.copy()
for case in cases.values():
case['input']['algo_dt'] = to_timestamp(case['input']['algo_dt'])
case['expected'] = pd.DatetimeIndex([to_timestamp(dt_str) for dt_str
in case['expected']])
return cases
INDEX_TEST_CASES = convert_cases(INDEX_TEST_CASES_RAW)
def get_index_at_dt(case_input):
history_spec = history.HistorySpec(
case_input['bar_count'],
case_input['frequency'],
None,
False,
data_frequency='minute',
)
return history.index_at_dt(history_spec, case_input['algo_dt'])
class TestHistoryIndex(TestCase):
@classmethod
def setUpClass(cls):
cls.environment = TradingEnvironment.instance()
@parameterized.expand(
[(name, case['input'], case['expected'])
for name, case in INDEX_TEST_CASES.items()]
)
def test_index_at_dt(self, name, case_input, expected):
history_index = get_index_at_dt(case_input)
history_series = pd.Series(index=history_index)
expected_series = pd.Series(index=expected)
pd.util.testing.assert_series_equal(history_series, expected_series)
class TestHistoryContainer(TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment.instance()
def bar_data_dt(self, bar_data, require_unique=True):
"""
Get a dt to associate with the given BarData object.
If require_unique == True, throw an error if multiple unique dt's are
encountered. Otherwise, return the earliest dt encountered.
"""
dts = {sid_data['dt'] for sid_data in bar_data.values()}
if require_unique and len(dts) > 1:
self.fail("Multiple unique dts ({0}) in {1}".format(dts, bar_data))
return sorted(dts)[0]
@parameterized.expand(
[(name,
case['specs'],
case['sids'],
case['dt'],
case['updates'],
case['expected'])
for name, case in HISTORY_CONTAINER_TEST_CASES.items()]
)
def test_history_container(self,
name,
specs,
sids,
dt,
updates,
expected):
for spec in specs:
# Sanity check on test input.
self.assertEqual(len(expected[spec.key_str]), len(updates))
container = HistoryContainer(
{spec.key_str: spec for spec in specs}, sids, dt, 'minute',
)
for update_count, update in enumerate(updates):
bar_dt = self.bar_data_dt(update)
container.update(update, bar_dt)
for spec in specs:
pd.util.testing.assert_frame_equal(
container.get_history(spec, bar_dt),
expected[spec.key_str][update_count],
check_dtype=False,
check_column_type=True,
check_index_type=True,
check_frame_type=True,
)
def test_container_nans_and_daily_roll(self):
spec = history.HistorySpec(
bar_count=3,
frequency='1d',
field='price',
ffill=True,
data_frequency='minute'
)
specs = {spec.key_str: spec}
initial_sids = [1, ]
initial_dt = pd.Timestamp(
'2013-06-28 9:31AM', tz='US/Eastern').tz_convert('UTC')
container = HistoryContainer(
specs, initial_sids, initial_dt, 'minute'
)
bar_data = BarData()
container.update(bar_data, initial_dt)
# Since there was no backfill because of no db.
# And no first bar of data, so all values should be nans.
prices = container.get_history(spec, initial_dt)
nan_values = np.isnan(prices[1])
self.assertTrue(all(nan_values), nan_values)
# Add data on bar two of first day.
second_bar_dt = pd.Timestamp(
'2013-06-28 9:32AM', tz='US/Eastern').tz_convert('UTC')
bar_data[1] = {
'price': 10,
'dt': second_bar_dt
}
container.update(bar_data, second_bar_dt)
prices = container.get_history(spec, second_bar_dt)
# Prices should be
# 1
# 2013-06-26 20:00:00+00:00 NaN
# 2013-06-27 20:00:00+00:00 NaN
# 2013-06-28 13:32:00+00:00 10
self.assertTrue(np.isnan(prices[1].ix[0]))
self.assertTrue(np.isnan(prices[1].ix[1]))
self.assertEqual(prices[1].ix[2], 10)
third_bar_dt = pd.Timestamp(
'2013-06-28 9:33AM', tz='US/Eastern').tz_convert('UTC')
del bar_data[1]
container.update(bar_data, third_bar_dt)
prices = container.get_history(spec, third_bar_dt)
# The one should be forward filled
# Prices should be
# 1
# 2013-06-26 20:00:00+00:00 NaN
# 2013-06-27 20:00:00+00:00 NaN
# 2013-06-28 13:33:00+00:00 10
self.assertEquals(prices[1][third_bar_dt], 10)
# Note that we did not fill in data at the close.
# There was a bug where a nan was being introduced because of the
# last value of 'raw' data was used, instead of a ffilled close price.
day_two_first_bar_dt = pd.Timestamp(
'2013-07-01 9:31AM', tz='US/Eastern').tz_convert('UTC')
bar_data[1] = {
'price': 20,
'dt': day_two_first_bar_dt
}
container.update(bar_data, day_two_first_bar_dt)
prices = container.get_history(spec, day_two_first_bar_dt)
# Prices Should Be
# 1
# 2013-06-27 20:00:00+00:00 nan
# 2013-06-28 20:00:00+00:00 10
# 2013-07-01 13:31:00+00:00 20
self.assertTrue(np.isnan(prices[1].ix[0]))
self.assertEqual(prices[1].ix[1], 10)
self.assertEqual(prices[1].ix[2], 20)
# Clear out the bar data
del bar_data[1]
day_three_first_bar_dt = pd.Timestamp(
'2013-07-02 9:31AM', tz='US/Eastern').tz_convert('UTC')
container.update(bar_data, day_three_first_bar_dt)
prices = container.get_history(spec, day_three_first_bar_dt)
# 1
# 2013-06-28 20:00:00+00:00 10
# 2013-07-01 20:00:00+00:00 20
# 2013-07-02 13:31:00+00:00 20
self.assertTrue(prices[1].ix[0], 10)
self.assertTrue(prices[1].ix[1], 20)
self.assertTrue(prices[1].ix[2], 20)
day_four_first_bar_dt = pd.Timestamp(
'2013-07-03 9:31AM', tz='US/Eastern').tz_convert('UTC')
container.update(bar_data, day_four_first_bar_dt)
prices = container.get_history(spec, day_four_first_bar_dt)
# 1
# 2013-07-01 20:00:00+00:00 20
# 2013-07-02 20:00:00+00:00 20
# 2013-07-03 13:31:00+00:00 20
self.assertEqual(prices[1].ix[0], 20)
self.assertEqual(prices[1].ix[1], 20)
self.assertEqual(prices[1].ix[2], 20)
class TestHistoryAlgo(TestCase):
def setUp(self):
np.random.seed(123)
def test_history_daily(self):
bar_count = 3
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(bar_count={bar_count}, frequency='1d', field='price')
context.history_trace = []
def handle_data(context, data):
prices = history(bar_count={bar_count}, frequency='1d', field='price')
context.history_trace.append(prices)
""".format(bar_count=bar_count).strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-30', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end, data_frequency='daily')
_, df = factory.create_test_df_source(sim_params)
df = df.astype(np.float64)
source = DataFrameSource(df, sids=[0])
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='daily',
sim_params=sim_params
)
output = test_algo.run(source)
self.assertIsNotNone(output)
history_trace = test_algo.history_trace
for i, received in enumerate(history_trace[bar_count - 1:]):
expected = df.iloc[i:i + bar_count]
assert_frame_equal(expected, received)
def test_history_daily_data_1m_window(self):
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(bar_count=1, frequency='1m', field='price')
def handle_data(context, data):
prices = history(bar_count=3, frequency='1d', field='price')
""".strip()
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-30', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
with self.assertRaises(IncompatibleHistoryFrequency):
TradingAlgorithm(
script=algo_text,
data_frequency='daily',
sim_params=sim_params
)
def test_basic_history(self):
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(bar_count=2, frequency='1d', field='price')
def handle_data(context, data):
prices = history(bar_count=2, frequency='1d', field='price')
prices['prices_times_two'] = prices[1] * 2
context.last_prices = prices
""".strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-21', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
self.assertIsNotNone(output)
last_prices = test_algo.last_prices[0]
oldest_dt = pd.Timestamp(
'2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC')
newest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
self.assertEquals(oldest_dt, last_prices.index[0])
self.assertEquals(newest_dt, last_prices.index[-1])
# Random, depends on seed
self.assertEquals(139.36946942498648, last_prices[oldest_dt])
self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_basic_history_one_day(self):
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(bar_count=1, frequency='1d', field='price')
def handle_data(context, data):
prices = history(bar_count=1, frequency='1d', field='price')
context.last_prices = prices
""".strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-21', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
self.assertIsNotNone(output)
last_prices = test_algo.last_prices[0]
# oldest and newest should be the same if there is only 1 bar
oldest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
newest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
self.assertEquals(oldest_dt, last_prices.index[0])
self.assertEquals(newest_dt, last_prices.index[-1])
# Random, depends on seed
self.assertEquals(180.15661995395106, last_prices[oldest_dt])
self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_basic_history_positional_args(self):
"""
Ensure that positional args work.
"""
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(2, '1d', 'price')
def handle_data(context, data):
prices = history(2, '1d', 'price')
context.last_prices = prices
""".strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-21', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
self.assertIsNotNone(output)
last_prices = test_algo.last_prices[0]
oldest_dt = pd.Timestamp(
'2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC')
newest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
self.assertEquals(oldest_dt, last_prices.index[0])
self.assertEquals(newest_dt, last_prices.index[-1])
self.assertEquals(139.36946942498648, last_prices[oldest_dt])
self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_history_with_volume(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'volume')
def handle_data(context, data):
volume = history(3, '1d', 'volume')
record(current_volume=volume[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_volume'],
212218404.0)
def test_history_with_high(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'high')
def handle_data(context, data):
highs = history(3, '1d', 'high')
record(current_high=highs[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_high'],
139.5370641791925)
def test_history_with_low(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'low')
def handle_data(context, data):
lows = history(3, '1d', 'low')
record(current_low=lows[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_low'],
99.891436939669944)
def test_history_with_open(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'open_price')
def handle_data(context, data):
opens = history(3, '1d', 'open_price')
record(current_open=opens[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_open'],
99.991436939669939)
def test_history_passed_to_func(self):
"""
Had an issue where MagicMock was causing errors during validation
with rolling mean.
"""
algo_text = """
from zipline.api import history, add_history
import pandas as pd
def initialize(context):
add_history(2, '1d', 'price')
def handle_data(context, data):
prices = history(2, '1d', 'price')
pd.rolling_mean(prices, 2)
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
# At this point, just ensure that there is no crash.
self.assertIsNotNone(output)
def test_history_passed_to_talib(self):
"""
Had an issue where MagicMock was causing errors during validation
with talib.
We don't officially support a talib integration, yet.
But using talib directly should work.
"""
algo_text = """
import talib
import numpy as np
from zipline.api import history, add_history, record
def initialize(context):
add_history(2, '1d', 'price')
def handle_data(context, data):
prices = history(2, '1d', 'price')
ma_result = talib.MA(np.asarray(prices[0]), timeperiod=2)
record(ma=ma_result[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
# Eddie: this was set to 04-10 but I don't see how that makes
# sense as it does not generate enough data to get at -2 index
# below.
start = pd.Timestamp('2007-04-05', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='daily'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
# At this point, just ensure that there is no crash.
self.assertIsNotNone(output)
recorded_ma = output.ix[-2, 'ma']
self.assertFalse(pd.isnull(recorded_ma))
# Depends on seed
np.testing.assert_almost_equal(recorded_ma,
159.76304468946876)
@parameterized.expand([
('daily',),
('minute',),
])
def test_history_container_constructed_at_runtime(self, data_freq):
algo_text = dedent(
"""\
from zipline.api import history
def handle_data(context, data):
context.prices = history(2, '1d', 'price')
"""
)
start = pd.Timestamp('2007-04-05', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency=data_freq,
emission_rate=data_freq
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency=data_freq,
sim_params=sim_params
)
source = RandomWalkSource(start=start, end=end, freq=data_freq)
self.assertIsNone(test_algo.history_container)
test_algo.run(source)
self.assertIsNotNone(
test_algo.history_container,
msg='HistoryContainer was not constructed at runtime',
)
container = test_algo.history_container
self.assertEqual(
len(container.digest_panels),
1,
msg='The HistoryContainer created too many digest panels',
)
freq, digest = list(container.digest_panels.items())[0]
self.assertEqual(
freq.unit_str,
'd',
)
self.assertEqual(
digest.window_length,
1,
msg='The digest panel is not large enough to service the given'
' HistorySpec',
)
@parameterized.expand([
(1,),
(2,),
])
def test_history_grow_length_inter_bar(self, incr):
"""
Tests growing the length of a digest panel with different date_buf
deltas once per bar.
"""
algo_text = dedent(
"""\
from zipline.api import history
def initialize(context):
context.bar_count = 1
def handle_data(context, data):
prices = history(context.bar_count, '1d', 'price')
context.test_case.assertEqual(len(prices), context.bar_count)
context.bar_count += {incr}
"""
).format(incr=incr)
start = pd.Timestamp('2007-04-05', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='daily'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
test_algo.test_case = self
source = RandomWalkSource(start=start, end=end)
self.assertIsNone(test_algo.history_container)
test_algo.run(source)
@parameterized.expand([
(1,),
(2,),
])
def test_history_grow_length_intra_bar(self, incr):
"""
Tests growing the length of a digest panel with different date_buf
deltas in a single bar.
"""
algo_text = dedent(
"""\
from zipline.api import history
def initialize(context):
context.bar_count = 1
def handle_data(context, data):
prices = history(context.bar_count, '1d', 'price')
context.test_case.assertEqual(len(prices), context.bar_count)
context.bar_count += {incr}
prices = history(context.bar_count, '1d', 'price')
context.test_case.assertEqual(len(prices), context.bar_count)
"""
).format(incr=incr)
start = pd.Timestamp('2007-04-05', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='daily'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
test_algo.test_case = self
source = RandomWalkSource(start=start, end=end)
self.assertIsNone(test_algo.history_container)
test_algo.run(source)
class TestHistoryContainerResize(TestCase):
@parameterized.expand(
(freq, field, data_frequency, construct_digest)
for freq in ('1m', '1d')
for field in HistoryContainer.VALID_FIELDS
for data_frequency in ('minute', 'daily')
for construct_digest in (True, False)
if not (freq == '1m' and data_frequency == 'daily')
)
def test_history_grow_length(self,
freq,
field,
data_frequency,
construct_digest):
bar_count = 2 if construct_digest else 1
spec = history.HistorySpec(
bar_count=bar_count,
frequency=freq,
field=field,
ffill=True,
data_frequency=data_frequency,
)
specs = {spec.key_str: spec}
initial_sids = [1]
initial_dt = pd.Timestamp(
'2013-06-28 13:31AM'
if data_frequency == 'minute'
else '2013-06-28 12:00AM',
tz='UTC',
)
container = HistoryContainer(
specs, initial_sids, initial_dt, data_frequency,
)
if construct_digest:
self.assertEqual(
container.digest_panels[spec.frequency].window_length, 1,
)
bar_data = BarData()
container.update(bar_data, initial_dt)
to_add = (
history.HistorySpec(
bar_count=bar_count + 1,
frequency=freq,
field=field,
ffill=True,
data_frequency=data_frequency,
),
history.HistorySpec(
bar_count=bar_count + 2,
frequency=freq,
field=field,
ffill=True,
data_frequency=data_frequency,
),
)
for spec in to_add:
container.ensure_spec(spec, initial_dt, bar_data)
self.assertEqual(
container.digest_panels[spec.frequency].window_length,
spec.bar_count - 1,
)
self.assert_history(container, spec, initial_dt)
@parameterized.expand(
(bar_count, freq, pair, data_frequency)
for bar_count in (1, 2)
for freq in ('1m', '1d')
for pair in product(HistoryContainer.VALID_FIELDS, repeat=2)
for data_frequency in ('minute', 'daily')
if not (freq == '1m' and data_frequency == 'daily')
)
def test_history_add_field(self, bar_count, freq, pair, data_frequency):
first, second = pair
spec = history.HistorySpec(
bar_count=bar_count,
frequency=freq,
field=first,
ffill=True,
data_frequency=data_frequency,
)
specs = {spec.key_str: spec}
initial_sids = [1]
initial_dt = pd.Timestamp(
'2013-06-28 13:31AM'
if data_frequency == 'minute'
else '2013-06-28 12:00AM',
tz='UTC',
)
container = HistoryContainer(
specs, initial_sids, initial_dt, data_frequency,
)
if bar_count > 1:
self.assertEqual(
container.digest_panels[spec.frequency].window_length, 1,
)
bar_data = BarData()
container.update(bar_data, initial_dt)
new_spec = history.HistorySpec(
bar_count,
frequency=freq,
field=second,
ffill=True,
data_frequency=data_frequency,
)
container.ensure_spec(new_spec, initial_dt, bar_data)
if bar_count > 1:
digest_panel = container.digest_panels[new_spec.frequency]
self.assertEqual(digest_panel.window_length, bar_count - 1)
self.assertIn(second, digest_panel.items)
else:
self.assertNotIn(new_spec.frequency, container.digest_panels)
self.assert_history(container, new_spec, initial_dt)
@parameterized.expand(
(bar_count, pair, field, data_frequency)
for bar_count in (1, 2)
for pair in product(('1m', '1d'), repeat=2)
for field in HistoryContainer.VALID_FIELDS
for data_frequency in ('minute', 'daily')
if not ('1m' in pair and data_frequency == 'daily')
)
def test_history_add_freq(self, bar_count, pair, field, data_frequency):
first, second = pair
spec = history.HistorySpec(
bar_count=bar_count,
frequency=first,
field=field,
ffill=True,
data_frequency=data_frequency,
)
specs = {spec.key_str: spec}
initial_sids = [1]
initial_dt = pd.Timestamp(
'2013-06-28 13:31AM'
if data_frequency == 'minute'
else '2013-06-28 12:00AM',
tz='UTC',
)
container = HistoryContainer(
specs, initial_sids, initial_dt, data_frequency,
)
if bar_count > 1:
self.assertEqual(
container.digest_panels[spec.frequency].window_length, 1,
)
bar_data = BarData()
container.update(bar_data, initial_dt)
new_spec = history.HistorySpec(
bar_count,
frequency=second,
field=field,
ffill=True,
data_frequency=data_frequency,
)
container.ensure_spec(new_spec, initial_dt, bar_data)
if bar_count > 1:
digest_panel = container.digest_panels[new_spec.frequency]
self.assertEqual(digest_panel.window_length, bar_count - 1)
else:
self.assertNotIn(new_spec.frequency, container.digest_panels)
self.assert_history(container, new_spec, initial_dt)
@with_environment()
def assert_history(self, container, spec, dt, env=None):
hst = container.get_history(spec, dt)
self.assertEqual(len(hst), spec.bar_count)
back = spec.frequency.prev_bar
for n in reversed(hst.index):
self.assertEqual(dt, n)
dt = back(dt)
| apache-2.0 |
bitemyapp/ggplot | ggplot/stats/stat_bar.py | 12 | 1322 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
from .stat import stat
_MSG_LABELS = """There are more than 30 unique values mapped to x.
If you want a histogram instead, use 'geom_histogram()'.
"""
class stat_bar(stat):
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'geom': 'bar', 'position': 'stack',
'width': 0.9, 'drop': False,
'origin': None, 'labels': None}
def _calculate(self, data):
# reorder x according to the labels
new_data = pd.DataFrame()
new_data["x"] = self.labels
for column in set(data.columns) - set('x'):
column_dict = dict(zip(data["x"],data[column]))
default = 0 if column == "y" else data[column].values[0]
new_data[column] = [column_dict.get(val, default)
for val in self.labels]
return new_data
def _calculate_global(self, data):
labels = self.params['labels']
if labels == None:
labels = sorted(set(data['x'].values))
# For a lot of labels, put out a warning
if len(labels) > 30:
self._print_warning(_MSG_LABELS)
# Check if there is a mapping
self.labels = labels
| bsd-2-clause |
a4a881d4/6FSK | modu.py | 1 | 1695 | import const
import freqTab
import math
import numpy as np
import utils
import matplotlib.pyplot as plt
def dpmap(D,P):
r = []
p = 1
for k in range(len(D)):
r.append((D[k]*const.DataA+P[k]*const.PilotA*1j)*p)
p = p * 1j
return r
def toFreq(D,P):
m = dpmap(D,P)
p = [const.constellationIndex[c] for c in m]
r = [const.c2f[p[k]][p[k+1]] for k in range(len(p)-1)]
return r,p
def modu(D,P,E,b,W):
if len(D)>len(P):
for k in range(len(P),len(D)):
P.append(1)
if len(P)>len(D):
for k in range(len(D),len(P)):
D.append(1)
if len(D)%2 == 1:
P.append(1)
D.append(1)
hD = D[:E]
tD = D[-E:]
D = tD + D + hD
hP = P[:E]
tP = P[-E:]
P = tP + P + hP
#for k in range(E):
# D.append(D[k])
# P.append(P[k])
S,C,Mask = freqTab.freqTab(b,W)
r,p = toFreq(D,P)
d = []
for k in range(len(r)):
p0 = C[p[k]]
for p1 in S[r[k]]:
d.append((p0+p1)&Mask)
return d
def toComplex(s,W):
M = 2.*math.pi/float(1<<W)
r = [1j*float(a)*M for a in s]
return np.exp(np.array(r))
def showSpectrum(c,l):
pc = utils.spectrum(c)
rs = float(1024)/float(len(pc))
x = np.arange(100)*rs
plt.plot(x,20.*np.log10(pc[:100]),l)
def showTiming(c,l):
import matplotlib.pyplot as plt
x = c[l::1024]
plt.plot(x.real,x.imag,'.')
plt.show()
def main():
D = utils.rsrc(1024*64)
P = utils.rsrc(1024*64)
d = modu(D,P,4,math.pi/8,18)
c = toComplex(d,18)
showSpectrum(c,'r')
d = modu(D,P,4,math.pi/6,18)
c = toComplex(d,18)
showSpectrum(c,'b')
d = modu(D,P,4,math.pi/5,18)
c = toComplex(d,18)
showSpectrum(c,'y')
d = modu(D,P,4,math.pi/4,18)
c = toComplex(d,18)
showSpectrum(c,'g')
#showTiming(c,1024/16)
plt.show()
return c
if __name__ == '__main__':
c = main()
| gpl-3.0 |
sahat/bokeh | bokeh/tests/test_protocol.py | 4 | 3772 | import unittest
from unittest import skipIf
import numpy as np
from .test_utils import skipIfPyPy
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
class TestBokehJSONEncoder(unittest.TestCase):
def setUp(self):
from bokeh.protocol import BokehJSONEncoder
self.encoder = BokehJSONEncoder()
def test_fail(self):
self.assertRaises(TypeError, self.encoder.default, {'testing': 1})
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_panda_series(self):
s = pd.Series([1, 3, 5, 6, 8])
self.assertEqual(self.encoder.default(s), [1, 3, 5, 6, 8])
def test_numpyarray(self):
a = np.arange(5)
self.assertEqual(self.encoder.default(a), [0, 1, 2, 3, 4])
def test_numpyint(self):
npint = np.asscalar(np.int64(1))
self.assertEqual(self.encoder.default(npint), 1)
self.assertIsInstance(self.encoder.default(npint), int)
def test_numpyfloat(self):
npfloat = np.float64(1.33)
self.assertEqual(self.encoder.default(npfloat), 1.33)
self.assertIsInstance(self.encoder.default(npfloat), float)
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_pd_timestamp(self):
ts = pd.tslib.Timestamp('April 28, 1948')
self.assertEqual(self.encoder.default(ts), -684115200000)
class TestSerializeJson(unittest.TestCase):
def setUp(self):
from bokeh.protocol import serialize_json, deserialize_json
self.serialize = serialize_json
self.deserialize = deserialize_json
def test_with_basic(self):
self.assertEqual(self.serialize({'test': [1, 2, 3]}), '{"test": [1, 2, 3]}')
def test_with_np_array(self):
a = np.arange(5)
self.assertEqual(self.serialize(a), '[0, 1, 2, 3, 4]')
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_with_pd_series(self):
s = pd.Series([0, 1, 2, 3, 4])
self.assertEqual(self.serialize(s), '[0, 1, 2, 3, 4]')
def test_nans_and_infs(self):
arr = np.array([np.nan, np.inf, -np.inf, 0])
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_nans_and_infs_pandas(self):
arr = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_datetime_types(self):
"""should convert to millis
"""
idx = pd.date_range('2001-1-1', '2001-1-5')
df = pd.DataFrame({'vals' :idx}, index=idx)
serialized = self.serialize({'vals' : df.vals,
'idx' : df.index})
deserialized = self.deserialize(serialized)
baseline = {u'vals': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000],
u'idx': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000]
}
assert deserialized == baseline
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
auDeep/auDeep | audeep/backend/parsers/compare19_sd.py | 1 | 4730 | # Copyright (C) 2017-2019 Michael Freitag, Shahin Amiriparian, Sergey Pugachevskiy, Nicholas Cummins, Björn Schuller
#
# This file is part of auDeep.
#
# auDeep is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# auDeep is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with auDeep. If not, see <http://www.gnu.org/licenses/>.
"""Parser for the ComParE 2019 Styrian Dialects (SD) dataset"""
import abc
from pathlib import Path
from typing import Optional, Mapping, Sequence
import pandas as pd
from audeep.backend.data.data_set import Partition
from audeep.backend.log import LoggingMixin
from audeep.backend.parsers.base import Parser, _InstanceMetadata
_COMPARE19_SD_LABEL_MAP = {
"EasternS": 0,
"NorthernS": 1,
"UrbanS": 2,
}
class Compare19SDParser(LoggingMixin, Parser):
def __init__(self, basedir: Path):
super().__init__(basedir)
self._metadata_cache = None
self._audio_dir = basedir / "wav"
@abc.abstractmethod
def label_key(self) -> str:
pass
def _metadata(self) -> pd.DataFrame:
if not self.can_parse():
raise IOError("unable to parse the ComParE 2019 Styrian Dialects dataset at {}".format(self._basedir))
if self._metadata_cache is None:
metadata_file = self._basedir / "lab" / "labels.csv"
metadata_file_confidential = self._basedir / "lab" / "labels_confidential.csv"
if (metadata_file_confidential.exists()):
self.log.warn("using confidential metadata file")
self._metadata_cache = pd.read_csv(metadata_file_confidential, sep=",")
else:
self._metadata_cache = pd.read_csv(metadata_file, sep=",")
return self._metadata_cache
def can_parse(self) -> bool:
metadata_file = self._basedir / "lab" / "labels.csv"
metadata_file_confidential = self._basedir / "lab" / "labels_confidential.csv"
if not self._audio_dir.exists():
self.log.debug("cannot parse: audio directory at %s missing", self._audio_dir)
return False
if not metadata_file_confidential.exists() and not metadata_file.exists():
self.log.debug("cannot parse: metadata file at %s missing", metadata_file)
return False
return True
@property
def label_map(self) -> Optional[Mapping[str, int]]:
if not self.can_parse():
raise IOError("inable to parse the ComParE 2019 Styrian Dialects dataset at {}".format(self._basedir))
return _COMPARE19_SD_LABEL_MAP
@property
def num_instances(self) -> int:
if not self.can_parse():
raise IOError("unable to parse the ComParE 2019 Styrian Dialects dataset at {}".format(self._basedir))
# test instances are not contained in label tsv file
return len(list(self._audio_dir.glob("*.*")))
@property
def num_folds(self) -> int:
if not self.can_parse():
raise IOError("unable to parse the ComParE 2019 Styrian Dialects dataset at {}".format(self._basedir))
return 0
def parse(self) -> Sequence[_InstanceMetadata]:
if not self.can_parse():
raise IOError("unable to parse the ComParE 2019 Styrian Dialects dataset at {}".format(self._basedir))
meta_list = []
metadata = self._metadata()
for file in sorted(self._audio_dir.glob("*.*")):
label_nominal = metadata.loc[metadata["file_name"] == file.name]["label"]
# test labels are '?'
if all(l != '?' for l in label_nominal):
label_nominal = label_nominal.iloc[0]
else:
label_nominal = None
instance_metadata = _InstanceMetadata(
path=file,
filename=file.name,
label_nominal=label_nominal,
label_numeric=None, # inferred from label map
cv_folds=[],
partition=Partition.TRAIN if file.name.startswith("train") else Partition.DEVEL if file.name.startswith(
"devel") else Partition.TEST
)
self.log.debug("parsed instance %s: label = %s", file.name, label_nominal)
meta_list.append(instance_metadata)
return meta_list
| gpl-3.0 |
jakobworldpeace/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 86 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three exemplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
IndraVikas/scikit-learn | sklearn/metrics/ranking.py | 75 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
MatthieuBizien/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | 6 | 11602 | """Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from collections import Hashable
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
""" Compare analytic and numeric gradient of kernels. """
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
""" Check that parameter vector theta of kernel is set correctly. """
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s.rstrip("_bounds"),
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i+1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i+1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
""" Auto-correlation and cross-correlation should be consistent. """
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
""" Test that diag method of kernel returns consistent results. """
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
""" Adding kernels and multiplying kernels should be commutative. """
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
""" Anisotropic kernel should be consistent with isotropic kernels."""
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
""" Test stationarity of kernels."""
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def test_kernel_clone():
""" Test that sklearn's clone works correctly on kernels. """
for kernel in kernels:
kernel_cloned = clone(kernel)
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
for attr in kernel.__dict__.keys():
attr_value = getattr(kernel, attr)
attr_value_cloned = getattr(kernel_cloned, attr)
if attr.startswith("hyperparameter_"):
assert_equal(attr_value.name, attr_value_cloned.name)
assert_equal(attr_value.value_type,
attr_value_cloned.value_type)
assert_array_equal(attr_value.bounds,
attr_value_cloned.bounds)
assert_equal(attr_value.n_elements,
attr_value_cloned.n_elements)
elif np.iterable(attr_value):
for i in range(len(attr_value)):
if np.iterable(attr_value[i]):
assert_array_equal(attr_value[i],
attr_value_cloned[i])
else:
assert_equal(attr_value[i], attr_value_cloned[i])
else:
assert_equal(attr_value, attr_value_cloned)
if not isinstance(attr_value, Hashable):
# modifiable attributes must not be identical
assert_not_equal(id(attr_value), id(attr_value_cloned))
def test_matern_kernel():
""" Test consistency of Matern kernel for special values of nu. """
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
"""Check that GP kernels can also be used as pairwise kernels."""
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
"""Check that set_params()/get_params() is consistent with kernel.theta."""
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value]*size})
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
[value]*size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
| bsd-3-clause |
montoyjh/pymatgen | pymatgen/phonon/tests/test_plotter.py | 3 | 3658 |
import unittest
import os
import json
import scipy
from io import open
from pymatgen.phonon.dos import CompletePhononDos
from pymatgen.phonon.plotter import PhononDosPlotter, PhononBSPlotter, ThermoPlotter
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class PhononDosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = PhononDosPlotter(sigma=0.2, stack=True)
self.plotter_nostack = PhononDosPlotter(sigma=0.2, stack=False)
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 2)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Na", "Cl"]:
self.assertIn(el, d)
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.add_dos("Total", self.dos)
self.plotter.get_plot(units="mev")
self.plotter_nostack.add_dos("Total", self.dos)
self.plotter_nostack.get_plot(units="mev")
class PhononBSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_phonon_bandstructure.json"), "r") as f:
d = json.loads(f.read())
self.bs = PhononBandStructureSymmLine.from_dict(d)
self.plotter = PhononBSPlotter(self.bs)
def test_bs_plot_data(self):
self.assertEqual(len(self.plotter.bs_plot_data()['distances'][0]), 51,
"wrong number of distances in the first branch")
self.assertEqual(len(self.plotter.bs_plot_data()['distances']), 4,
"wrong number of branches")
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()['distances']]),
204, "wrong number of distances")
self.assertEqual(self.plotter.bs_plot_data()['ticks']['label'][4], "Y",
"wrong tick label")
self.assertEqual(len(self.plotter.bs_plot_data()['ticks']['label']),
8, "wrong number of tick labels")
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.get_plot(units="mev")
class ThermoPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = ThermoPlotter(self.dos, self.dos.structure)
def test_plot_functions(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.plot_cv(5, 100, 5, show=False)
self.plotter.plot_entropy(5, 100, 5, show=False)
self.plotter.plot_internal_energy(5, 100, 5, show=False)
self.plotter.plot_helmholtz_free_energy(5, 100, 5, show=False)
self.plotter.plot_thermodynamic_properties(5, 100, 5, show=False)
if __name__ == "__main__":
unittest.main()
| mit |
yagmursato/RobotOrHuman | mainn.py | 1 | 1464 | import pandas
import numpy as np
from pandas import DataFrame
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
deneme = []
human = []
with open("human.txt","r") as f:
for line in f:
human.append(line)
deneme.append(1)
bot = []
with open("robot.txt","r") as f:
for line in f:
bot.append(line)
deneme.append(0)
data = {'text': human+bot,'status': deneme}
frame = pandas.DataFrame(data)
#print(frame)
#print(len(frame[frame.human==1]))
#print(frame.head())
frame_x=frame["text"]
frame_y=frame["status"]
vect = TfidfVectorizer(min_df=1)
x_train, x_test, y_train, y_test = train_test_split(frame_x,frame_y,test_size=0.2,random_state=4)
x_trainvect = vect.fit_transform(x_train)
vect1 = TfidfVectorizer(min_df=1)
x_trainvect=vect1.fit_transform(x_train)
mnb = MultinomialNB()
y_train=y_train.astype('int')
test = []
human_test = 0
robot_test = 0
mnb.fit(x_trainvect,y_train)
with open("test.txt","r") as f:
for line in f:
test.append(line)
for i in range(len(test)):
x_testvect = vect1.transform([test[i]])
pred = mnb.predict(x_testvect)
if (pred[0]==1):
human_test += 1
else:
robot_test += 1
if (human_test>robot_test):
print("Bu hesap insana aittir.")
else:
print("Bu hesap bot hesaptır.")
print(human_test)
print(robot_test) | apache-2.0 |
RBDA-F17/crime | code_drop_2/filter_clean_taxi.py | 1 | 3471 | import os
import sys
import pandas as pd
from pyspark.sql.types import *
from pyspark.sql import Row, Column
from pyspark.sql.functions import *
from datetime import datetime
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import udf
user = os. environ['USER']
if user not in ['cpa253','vaa238','vm1370']:
user = 'cpa253'
sc = SparkContext()
sqlContext = SQLContext(sc)
# y= 2016
# file = '/user/%s/rbda/crime/data/taxi_data_clean/yellow' % (user)
# df = sqlContext.read.option("mergeSchema", "true").parquet(file)
# for col,t in df.dtypes:
# if t == 'string':
# d = df.select(col).distinct()
# d.show()
# # print d.toPandas().sort_values(by=col).to_latex(index=False)
# else:
# if t!= 'timestamp':
# d = df.describe(col)
# d.show()
# # print d.toPandas().to_latex(index=False)
weather = sqlContext.read.parquet('/user/%s/rbda/crime/data/weather_clean' %(user) ).filter("year(time) >= 2009")
def get_station(lon,lat):
s_coords = pd.DataFrame({
'station': [
"Central Park",
"La Guardia",
"JFK",
]
,
'lat': [
40.782483,
40.776212,
40.640773,
],
'lon':[
-73.965816,
-73.874009,
-73.779180,
]
})
if not lon:
return None
if not lon:
return
s_coords['dist'] = (s_coords.lon - lon)**2 + (s_coords.lat - lat)**2
ind = s_coords['dist'].idxmin(axis=0)
out = s_coords.station[ind]
return out
get_station_udf = udf( get_station )
for y in xrange(2009,2018):
for m in xrange(1,13):
file_name = '/user/%s/rbda/crime/data/taxi_data_clean/yellow/year=%d/month=%02d' %(user,y,m)
df = sqlContext.read.parquet(file_name)
df = df.withColumn('fare_amount', abs(df.fare_amount))
df = df.withColumn('extra', abs(df.extra))
df = df.withColumn('mta_tax', abs(df.mta_tax))
df = df.withColumn('tip_amount',abs(df.tip_amount))
df = df.withColumn('total_amount',abs(df.total_amount))
df = df.withColumn('tolls_amount',abs(df.tolls_amount))
df = df.withColumn('improvement_surcharge',
when( year(df.pickup_datetime) < 2015 , 0.0).otherwise( 0.3 ) )
#df.describe(['fare_amount','extra','mta_tax','tip_amount','total_amount']).show()
df = df\
.filter( "pickup_longitude < -73.0 OR pickup_longitude IS NULL"
)\
.filter( "pickup_longitude > -74.3 OR pickup_longitude IS NULL"
)\
.filter( "pickup_latitude < 41.0 OR pickup_latitude is null")\
.filter( "pickup_latitude > 40.0 OR pickup_latitude is null" )\
.filter( "dropoff_longitude < -73.0 OR dropoff_longitude is null")\
.filter( "dropoff_longitude > -74.3 OR dropoff_longitude is null")\
.filter( "dropoff_latitude < 41.0 OR dropoff_latitude is null")\
.filter( "dropoff_latitude > 40.0 OR dropoff_latitude is null")
df = df\
.filter( df.trip_distance > 0.0)\
.filter( df.trip_distance < 100.0)\
.filter( df.fare_amount < 500 )\
.filter( df.extra < 100 )\
.filter( df.mta_tax < 100 )\
.filter( df.tip_amount < 200 )\
.filter( df.tolls_amount < 100 )\
.filter( df.total_amount < 1000 )
# df.describe(['fare_amount','extra','mta_tax','tip_amount','total_amount']).show()
df = df.withColumn('station', get_station_udf("pickup_longitude","pickup_latitude") )
output_folder = '/user/%s/rbda/crime/data/taxi_data_clean_weather/yellow/year=%d/month=%02d' %(user,y,m)
print 'Saving to hdfs://%s' % output_folder
df.write.mode('ignore').save(output_folder)
| gpl-3.0 |
maheshakya/scikit-learn | examples/applications/plot_prediction_latency.py | 25 | 11317 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
xtick_names = plt.setp(ax1, xticklabels=cls_infos)
plt.setp(xtick_names)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
hlin117/scikit-learn | examples/hetero_feature_union.py | 81 | 6241 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <matt.terry@gmail.com>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to scikit-learn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this example faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
cactusbin/nyt | matplotlib/lib/matplotlib/contour.py | 4 | 66434 | """
These are classes to support contour plotting and
labelling for the axes class
"""
from __future__ import division, print_function
import warnings
import matplotlib as mpl
import numpy as np
from numpy import ma
import matplotlib._cntr as _cntr
import matplotlib.path as mpath
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.collections as mcoll
import matplotlib.font_manager as font_manager
import matplotlib.text as text
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
import matplotlib.mathtext as mathtext
import matplotlib.patches as mpatches
import matplotlib.texmanager as texmanager
import matplotlib.transforms as mtrans
# Import needed for adding manual selection capability to clabel
from matplotlib.blocking_input import BlockingContourLabeler
# We can't use a single line collection for contour because a line
# collection can have only a single line style, and we want to be able to have
# dashed negative contours, for example, and solid positive contours.
# We could use a single polygon collection for filled contours, but it
# seems better to keep line and filled contours similar, with one collection
# per level.
class ClabelText(text.Text):
"""
Unlike the ordinary text, the get_rotation returns an updated
angle in the pixel coordinate assuming that the input rotation is
an angle in data coordinate (or whatever transform set).
"""
def get_rotation(self):
angle = text.Text.get_rotation(self)
trans = self.get_transform()
x, y = self.get_position()
new_angles = trans.transform_angles(np.array([angle]),
np.array([[x, y]]))
return new_angles[0]
class ContourLabeler:
"""Mixin to provide labelling capability to ContourSet"""
def clabel(self, *args, **kwargs):
"""
Label a contour plot.
Call signature::
clabel(cs, **kwargs)
Adds labels to line contours in *cs*, where *cs* is a
:class:`~matplotlib.contour.ContourSet` object returned by
contour.
::
clabel(cs, v, **kwargs)
only labels contours listed in *v*.
Optional keyword arguments:
*fontsize*:
size in points or relative size eg 'smaller', 'x-large'
*colors*:
- if *None*, the color of each label matches the color of
the corresponding contour
- if one string color, e.g., *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color
- if a tuple of matplotlib color args (string, float, rgb, etc),
different labels will be plotted in different colors in the order
specified
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
*fmt*:
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour level
(i.e., fmt[level]=string), or it can be any callable, such
as a :class:`~matplotlib.ticker.Formatter` instance, that
returns a string when called with a numeric contour level.
*manual*:
if *True*, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
*manual* can be an iterable object of x,y tuples. Contour labels
will be created as if mouse is clicked at each x,y positions.
*rightside_up*:
if *True* (default), label rotations will always be plus
or minus 90 degrees from level.
*use_clabeltext*:
if *True* (default is False), ClabelText class (instead of
matplotlib.Text) is used to create labels. ClabelText
recalculates rotation angles of texts during the drawing time,
therefore this can be used if aspect of the axes changes.
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
"""
NOTES on how this all works:
clabel basically takes the input arguments and uses them to
add a list of "label specific" attributes to the ContourSet
object. These attributes are all of the form label* and names
should be fairly self explanatory.
Once these attributes are set, clabel passes control to the
labels method (case of automatic label placement) or
BlockingContourLabeler (case of manual label placement).
"""
fontsize = kwargs.get('fontsize', None)
inline = kwargs.get('inline', 1)
inline_spacing = kwargs.get('inline_spacing', 5)
self.labelFmt = kwargs.get('fmt', '%1.3f')
_colors = kwargs.get('colors', None)
self._use_clabeltext = kwargs.get('use_clabeltext', False)
# Detect if manual selection is desired and remove from argument list
self.labelManual = kwargs.get('manual', False)
self.rightside_up = kwargs.get('rightside_up', True)
if len(args) == 0:
levels = self.levels
indices = range(len(self.cvalues))
elif len(args) == 1:
levlabs = list(args[0])
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
msg = "Specified levels " + str(levlabs)
msg += "\n don't match available levels "
msg += str(self.levels)
raise ValueError(msg)
else:
raise TypeError("Illegal arguments to clabel, see help(clabel)")
self.labelLevelList = levels
self.labelIndiceList = indices
self.labelFontProps = font_manager.FontProperties()
if fontsize is None:
font_size = int(self.labelFontProps.get_size_in_points())
else:
if type(fontsize) not in [int, float, str]:
raise TypeError("Font size must be an integer number.")
# Can't it be floating point, as indicated in line above?
else:
if type(fontsize) == str:
font_size = int(self.labelFontProps.get_size_in_points())
else:
self.labelFontProps.set_size(fontsize)
font_size = fontsize
self.labelFontSizeList = [font_size] * len(levels)
if _colors is None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
self.labelCValueList = range(len(self.labelLevelList))
self.labelMappable = cm.ScalarMappable(cmap=cmap,
norm=colors.NoNorm())
#self.labelTexts = [] # Initialized in ContourSet.__init__
#self.labelCValues = [] # same
self.labelXYs = []
if cbook.iterable(self.labelManual):
for x, y in self.labelManual:
self.add_label_near(x, y, inline,
inline_spacing)
elif self.labelManual:
print('Select label locations manually using first mouse button.')
print('End manual selection with second mouse button.')
if not inline:
print('Remove last label by clicking third mouse button.')
blocking_contour_labeler = BlockingContourLabeler(self)
blocking_contour_labeler(inline, inline_spacing)
else:
self.labels(inline, inline_spacing)
# Hold on to some old attribute names. These are deprecated and will
# be removed in the near future (sometime after 2008-08-01), but
# keeping for now for backwards compatibility
self.cl = self.labelTexts
self.cl_xy = self.labelXYs
self.cl_cvalues = self.labelCValues
self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
return self.labelTextsList
def print_label(self, linecontour, labelwidth):
"Return *False* if contours are too short for a label."
lcsize = len(linecontour)
if lcsize > 10 * labelwidth:
return True
xmax = np.amax(linecontour[:, 0])
xmin = np.amin(linecontour[:, 0])
ymax = np.amax(linecontour[:, 1])
ymin = np.amin(linecontour[:, 1])
lw = labelwidth
if (xmax - xmin) > 1.2 * lw or (ymax - ymin) > 1.2 * lw:
return True
else:
return False
def too_close(self, x, y, lw):
"Return *True* if a label is already near this location."
for loc in self.labelXYs:
d = np.sqrt((x - loc[0]) ** 2 + (y - loc[1]) ** 2)
if d < 1.2 * lw:
return True
return False
def get_label_coords(self, distances, XX, YY, ysize, lw):
"""
Return x, y, and the index of a label location.
Labels are plotted at a location with the smallest
deviation of the contour from a straight line
unless there is another label nearby, in which case
the next best place on the contour is picked up.
If all such candidates are rejected, the beginning
of the contour is chosen.
"""
hysize = int(ysize / 2)
adist = np.argsort(distances)
for ind in adist:
x, y = XX[ind][hysize], YY[ind][hysize]
if self.too_close(x, y, lw):
continue
return x, y, ind
ind = adist[0]
x, y = XX[ind][hysize], YY[ind][hysize]
return x, y, ind
def get_label_width(self, lev, fmt, fsize):
"""
Return the width of the label in points.
"""
if not cbook.is_string_like(lev):
lev = self.get_text(lev, fmt)
lev, ismath = text.Text.is_math_text(lev)
if ismath == 'TeX':
if not hasattr(self, '_TeX_manager'):
self._TeX_manager = texmanager.TexManager()
lw, _, _ = self._TeX_manager.get_text_width_height_descent(lev,
fsize)
elif ismath:
if not hasattr(self, '_mathtext_parser'):
self._mathtext_parser = mathtext.MathTextParser('bitmap')
img, _ = self._mathtext_parser.parse(lev, dpi=72,
prop=self.labelFontProps)
lw = img.get_width() # at dpi=72, the units are PostScript points
else:
# width is much less than "font size"
lw = (len(lev)) * fsize * 0.6
return lw
def get_real_label_width(self, lev, fmt, fsize):
"""
This computes actual onscreen label width.
This uses some black magic to determine onscreen extent of non-drawn
label. This magic may not be very robust.
This method is not being used, and may be modified or removed.
"""
# Find middle of axes
xx = np.mean(np.asarray(self.ax.axis()).reshape(2, 2), axis=1)
# Temporarily create text object
t = text.Text(xx[0], xx[1])
self.set_label_props(t, self.get_text(lev, fmt), 'k')
# Some black magic to get onscreen extent
# NOTE: This will only work for already drawn figures, as the canvas
# does not have a renderer otherwise. This is the reason this function
# can't be integrated into the rest of the code.
bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
# difference in pixel extent of image
lw = np.diff(bbox.corners()[0::2, 0])[0]
return lw
def set_label_props(self, label, text, color):
"set the label properties - color, fontsize, text"
label.set_text(text)
label.set_color(color)
label.set_fontproperties(self.labelFontProps)
label.set_clip_box(self.ax.bbox)
def get_text(self, lev, fmt):
"get the text of the label"
if cbook.is_string_like(lev):
return lev
else:
if isinstance(fmt, dict):
return fmt[lev]
elif callable(fmt):
return fmt(lev)
else:
return fmt % lev
def locate_label(self, linecontour, labelwidth):
"""
Find a good place to plot a label (relatively flat
part of the contour).
"""
nsize = len(linecontour)
if labelwidth > 1:
xsize = int(np.ceil(nsize / labelwidth))
else:
xsize = 1
if xsize == 1:
ysize = nsize
else:
ysize = int(labelwidth)
XX = np.resize(linecontour[:, 0], (xsize, ysize))
YY = np.resize(linecontour[:, 1], (xsize, ysize))
#I might have fouled up the following:
yfirst = YY[:, 0].reshape(xsize, 1)
ylast = YY[:, -1].reshape(xsize, 1)
xfirst = XX[:, 0].reshape(xsize, 1)
xlast = XX[:, -1].reshape(xsize, 1)
s = (yfirst - YY) * (xlast - xfirst) - (xfirst - XX) * (ylast - yfirst)
L = np.sqrt((xlast - xfirst) ** 2 + (ylast - yfirst) ** 2).ravel()
dist = np.add.reduce(([(abs(s)[i] / L[i]) for i in range(xsize)]), -1)
x, y, ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
#print 'ind, x, y', ind, x, y
# There must be a more efficient way...
lc = [tuple(l) for l in linecontour]
dind = lc.index((x, y))
#print 'dind', dind
#dind = list(linecontour).index((x,y))
return x, y, dind
def calc_label_rot_and_inline(self, slc, ind, lw, lc=None, spacing=5):
"""
This function calculates the appropriate label rotation given
the linecontour coordinates in screen units, the index of the
label location and the label width.
It will also break contour and calculate inlining if *lc* is
not empty (lc defaults to the empty list if None). *spacing*
is the space around the label in pixels to leave empty.
Do both of these tasks at once to avoid calling mlab.path_length
multiple times, which is relatively costly.
The method used here involves calculating the path length
along the contour in pixel coordinates and then looking
approximately label width / 2 away from central point to
determine rotation and then to break contour if desired.
"""
if lc is None:
lc = []
# Half the label width
hlw = lw / 2.0
# Check if closed and, if so, rotate contour so label is at edge
closed = mlab.is_closed_polygon(slc)
if closed:
slc = np.r_[slc[ind:-1], slc[:ind + 1]]
if len(lc): # Rotate lc also if not empty
lc = np.r_[lc[ind:-1], lc[:ind + 1]]
ind = 0
# Path length in pixel space
pl = mlab.path_length(slc)
pl = pl - pl[ind]
# Use linear interpolation to get points around label
xi = np.array([-hlw, hlw])
if closed: # Look at end also for closed contours
dp = np.array([pl[-1], 0])
else:
dp = np.zeros_like(xi)
ll = mlab.less_simple_linear_interpolation(pl, slc, dp + xi,
extrap=True)
# get vector in pixel space coordinates from one point to other
dd = np.diff(ll, axis=0).ravel()
# Get angle of vector - must be calculated in pixel space for
# text rotation to work correctly
if np.all(dd == 0): # Must deal with case of zero length label
rotation = 0.0
else:
rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi
if self.rightside_up:
# Fix angle so text is never upside-down
if rotation > 90:
rotation = rotation - 180.0
if rotation < -90:
rotation = 180.0 + rotation
# Break contour if desired
nlc = []
if len(lc):
# Expand range by spacing
xi = dp + xi + np.array([-spacing, spacing])
# Get indices near points of interest
I = mlab.less_simple_linear_interpolation(
pl, np.arange(len(pl)), xi, extrap=False)
# If those indices aren't beyond contour edge, find x,y
if (not np.isnan(I[0])) and int(I[0]) != I[0]:
xy1 = mlab.less_simple_linear_interpolation(
pl, lc, [xi[0]])
if (not np.isnan(I[1])) and int(I[1]) != I[1]:
xy2 = mlab.less_simple_linear_interpolation(
pl, lc, [xi[1]])
# Make integer
I = [np.floor(I[0]), np.ceil(I[1])]
# Actually break contours
if closed:
# This will remove contour if shorter than label
if np.all(~np.isnan(I)):
nlc.append(np.r_[xy2, lc[I[1]:I[0] + 1], xy1])
else:
# These will remove pieces of contour if they have length zero
if not np.isnan(I[0]):
nlc.append(np.r_[lc[:I[0] + 1], xy1])
if not np.isnan(I[1]):
nlc.append(np.r_[xy2, lc[I[1]:]])
# The current implementation removes contours completely
# covered by labels. Uncomment line below to keep
# original contour if this is the preferred behavior.
#if not len(nlc): nlc = [ lc ]
return rotation, nlc
def _get_label_text(self, x, y, rotation):
dx, dy = self.ax.transData.inverted().transform_point((x, y))
t = text.Text(dx, dy, rotation=rotation,
horizontalalignment='center',
verticalalignment='center')
return t
def _get_label_clabeltext(self, x, y, rotation):
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
transDataInv = self.ax.transData.inverted()
dx, dy = transDataInv.transform_point((x, y))
drotation = transDataInv.transform_angles(np.array([rotation]),
np.array([[x, y]]))
t = ClabelText(dx, dy, rotation=drotation[0],
horizontalalignment='center',
verticalalignment='center')
return t
def _add_label(self, t, x, y, lev, cvalue):
color = self.labelMappable.to_rgba(cvalue, alpha=self.alpha)
_text = self.get_text(lev, self.labelFmt)
self.set_label_props(t, _text, color)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x, y))
# Add label to plot here - useful for manual mode label selection
self.ax.add_artist(t)
def add_label(self, x, y, rotation, lev, cvalue):
"""
Add contour label using :class:`~matplotlib.text.Text` class.
"""
t = self._get_label_text(x, y, rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_clabeltext(self, x, y, rotation, lev, cvalue):
"""
Add contour label using :class:`ClabelText` class.
"""
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
t = self._get_label_clabeltext(x, y, rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_near(self, x, y, inline=True, inline_spacing=5,
transform=None):
"""
Add a label near the point (x, y) of the given transform.
If transform is None, data transform is used. If transform is
False, IdentityTransform is used.
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
"""
if transform is None:
transform = self.ax.transData
if transform:
x, y = transform.transform_point((x, y))
conmin, segmin, imin, xmin, ymin = self.find_nearest_contour(
x, y, self.labelIndiceList)[:5]
# The calc_label_rot_and_inline routine requires that (xmin,ymin)
# be a vertex in the path. So, if it isn't, add a vertex here
paths = self.collections[conmin].get_paths()
lc = paths[segmin].vertices
if transform:
xcmin = transform.inverted().transform([xmin, ymin])
else:
xcmin = np.array([xmin, ymin])
if not np.allclose(xcmin, lc[imin]):
lc = np.r_[lc[:imin], np.array(xcmin)[None, :], lc[imin:]]
paths[segmin] = mpath.Path(lc)
# Get index of nearest level in subset of levels used for labeling
lmin = self.labelIndiceList.index(conmin)
# Coordinates of contour
paths = self.collections[conmin].get_paths()
lc = paths[segmin].vertices
# In pixel/screen space
slc = self.ax.transData.transform(lc)
# Get label width for rotating labels and breaking contours
lw = self.get_label_width(self.labelLevelList[lmin],
self.labelFmt, self.labelFontSizeList[lmin])
# Figure out label rotation.
if inline:
lcarg = lc
else:
lcarg = None
rotation, nlc = self.calc_label_rot_and_inline(
slc, imin, lw, lcarg,
inline_spacing)
self.add_label(xmin, ymin, rotation, self.labelLevelList[lmin],
self.labelCValueList[lmin])
if inline:
# Remove old, not looping over paths so we can do this up front
paths.pop(segmin)
# Add paths if not empty or single point
for n in nlc:
if len(n) > 1:
paths.append(mpath.Path(n))
def pop_label(self, index=-1):
"""Defaults to removing last label, but any index can be supplied"""
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
if self._use_clabeltext:
add_label = self.add_label_clabeltext
else:
add_label = self.add_label
for icon, lev, fsize, cvalue in zip(
self.labelIndiceList, self.labelLevelList,
self.labelFontSizeList, self.labelCValueList):
con = self.collections[icon]
trans = con.get_transform()
lw = self.get_label_width(lev, self.labelFmt, fsize)
lw *= self.ax.figure.dpi / 72.0 # scale to screen coordinates
additions = []
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices # Line contour
slc0 = trans.transform(lc) # Line contour in screen coords
# For closed polygons, add extra point to avoid division by
# zero in print_label and locate_label. Other than these
# functions, this is not necessary and should probably be
# eventually removed.
if mlab.is_closed_polygon(lc):
slc = np.r_[slc0, slc0[1:2, :]]
else:
slc = slc0
# Check if long enough for a label
if self.print_label(slc, lw):
x, y, ind = self.locate_label(slc, lw)
if inline:
lcarg = lc
else:
lcarg = None
rotation, new = self.calc_label_rot_and_inline(
slc0, ind, lw, lcarg,
inline_spacing)
# Actually add the label
add_label(x, y, rotation, lev, cvalue)
# If inline, add new contours
if inline:
for n in new:
# Add path if not empty or single point
if len(n) > 1:
additions.append(mpath.Path(n))
else: # If not adding label, keep old path
additions.append(linepath)
# After looping over all segments on a contour, remove old
# paths and add new ones if inlining
if inline:
del paths[:]
paths.extend(additions)
def _find_closest_point_on_leg(p1, p2, p0):
"""find closest point to p0 on line segment connecting p1 and p2"""
# handle degenerate case
if np.all(p2 == p1):
d = np.sum((p0 - p1)**2)
return d, p1
d21 = p2 - p1
d01 = p0 - p1
# project on to line segment to find closest point
proj = np.dot(d01, d21) / np.dot(d21, d21)
if proj < 0:
proj = 0
if proj > 1:
proj = 1
pc = p1 + proj * d21
# find squared distance
d = np.sum((pc-p0)**2)
return d, pc
def _find_closest_point_on_path(lc, point):
"""
lc: coordinates of vertices
point: coordinates of test point
"""
# find index of closest vertex for this segment
ds = np.sum((lc - point[None, :])**2, 1)
imin = np.argmin(ds)
dmin = np.inf
xcmin = None
legmin = (None, None)
closed = mlab.is_closed_polygon(lc)
# build list of legs before and after this vertex
legs = []
if imin > 0 or closed:
legs.append(((imin-1) % len(lc), imin))
if imin < len(lc) - 1 or closed:
legs.append((imin, (imin+1) % len(lc)))
for leg in legs:
d, xc = _find_closest_point_on_leg(lc[leg[0]], lc[leg[1]], point)
if d < dmin:
dmin = d
xcmin = xc
legmin = leg
return (dmin, xcmin, legmin)
class ContourSet(cm.ScalarMappable, ContourLabeler):
"""
Store a set of contour lines or filled regions.
User-callable method: clabel
Useful attributes:
ax:
The axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See :meth:`_process_colors`.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg 'filled' is *False* (default) or *True*.
The first three arguments must be:
*ax*: axes object.
*levels*: [level0, level1, ..., leveln]
A list of floating point numbers indicating the contour
levels.
*allsegs*: [level0segs, level1segs, ...]
List of all the polygon segments for all the *levels*.
For contour lines ``len(allsegs) == len(levels)``, and for
filled contour regions ``len(allsegs) = len(levels)-1``.
level0segs = [polygon0, polygon1, ...]
polygon0 = array_like [[x0,y0], [x1,y1], ...]
*allkinds*: *None* or [level0kinds, level1kinds, ...]
Optional list of all the polygon vertex kinds (code types), as
described and used in Path. This is used to allow multiply-
connected paths such as holes within filled polygons.
If not *None*, len(allkinds) == len(allsegs).
level0kinds = [polygon0kinds, ...]
polygon0kinds = [vertexcode0, vertexcode1, ...]
If *allkinds* is not *None*, usually all polygons for a particular
contour level are grouped together so that
level0segs = [polygon0] and level0kinds = [polygon0kinds].
Keyword arguments are as described in
:class:`~matplotlib.contour.QuadContourSet` object.
**Examples:**
.. plot:: mpl_examples/misc/contour_manual.py
"""
self.ax = ax
self.levels = kwargs.get('levels', None)
self.filled = kwargs.get('filled', False)
self.linewidths = kwargs.get('linewidths', None)
self.linestyles = kwargs.get('linestyles', None)
self.hatches = kwargs.get('hatches', [None])
self.alpha = kwargs.get('alpha', None)
self.origin = kwargs.get('origin', None)
self.extent = kwargs.get('extent', None)
cmap = kwargs.get('cmap', None)
self.colors = kwargs.get('colors', None)
norm = kwargs.get('norm', None)
vmin = kwargs.get('vmin', None)
vmax = kwargs.get('vmax', None)
self.extend = kwargs.get('extend', 'neither')
self.antialiased = kwargs.get('antialiased', None)
if self.antialiased is None and self.filled:
self.antialiased = False # eliminate artifacts; we are not
# stroking the boundaries.
# The default for line contours will be taken from
# the LineCollection default, which uses the
# rcParams['lines.antialiased']
self.nchunk = kwargs.get('nchunk', 0)
self.locator = kwargs.get('locator', None)
if (isinstance(norm, colors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = colors.LogNorm()
if self.extend is not 'neither':
raise ValueError('extend kwarg does not work yet with log '
' scale')
else:
self.logscale = False
if self.origin is not None:
assert(self.origin in ['lower', 'upper', 'image'])
if self.extent is not None:
assert(len(self.extent) == 4)
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image':
self.origin = mpl.rcParams['image.origin']
self._transform = kwargs.get('transform', None)
self._process_args(*args, **kwargs)
self._process_levels()
if self.colors is not None:
ncolors = len(self.levels)
if self.filled:
ncolors -= 1
i0 = 0
# Handle the case where colors are given for the extended
# parts of the contour.
extend_min = self.extend in ['min', 'both']
extend_max = self.extend in ['max', 'both']
use_set_under_over = False
# if we are extending the lower end, and we've been given enough
# colors then skip the first color in the resulting cmap. For the
# extend_max case we don't need to worry about passing more colors
# than ncolors as ListedColormap will clip.
total_levels = ncolors + int(extend_min) + int(extend_max)
if (len(self.colors) == total_levels and
any([extend_min, extend_max])):
use_set_under_over = True
if extend_min:
i0 = 1
cmap = colors.ListedColormap(self.colors[i0:None], N=ncolors)
if use_set_under_over:
if extend_min:
cmap.set_under(self.colors[0])
if extend_max:
cmap.set_over(self.colors[-1])
if self.filled:
self.collections = cbook.silent_list('mcoll.PathCollection')
else:
self.collections = cbook.silent_list('mcoll.LineCollection')
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
kw = {'cmap': cmap}
if norm is not None:
kw['norm'] = norm
# sets self.cmap, norm if needed;
cm.ScalarMappable.__init__(self, **kw)
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
self._process_colors()
self.allsegs, self.allkinds = self._get_allsegs_and_allkinds()
if self.filled:
if self.linewidths is not None:
warnings.warn('linewidths is ignored by contourf')
# Lower and upper contour levels.
lowers, uppers = self._get_lowers_and_uppers()
# Ensure allkinds can be zipped below.
if self.allkinds is None:
self.allkinds = [None] * len(self.allsegs)
for level, level_upper, segs, kinds in \
zip(lowers, uppers, self.allsegs, self.allkinds):
paths = self._make_paths(segs, kinds)
# Default zorder taken from Collection
zorder = kwargs.get('zorder', 1)
col = mcoll.PathCollection(
paths,
antialiaseds=(self.antialiased,),
edgecolors='none',
alpha=self.alpha,
transform=self.get_transform(),
zorder=zorder)
self.ax.add_collection(col)
self.collections.append(col)
else:
tlinewidths = self._process_linewidths()
self.tlinewidths = tlinewidths
tlinestyles = self._process_linestyles()
aa = self.antialiased
if aa is not None:
aa = (self.antialiased,)
for level, width, lstyle, segs in \
zip(self.levels, tlinewidths, tlinestyles, self.allsegs):
# Default zorder taken from LineCollection
zorder = kwargs.get('zorder', 2)
col = mcoll.LineCollection(
segs,
antialiaseds=aa,
linewidths=width,
linestyle=[lstyle],
alpha=self.alpha,
transform=self.get_transform(),
zorder=zorder)
col.set_label('_nolegend_')
self.ax.add_collection(col, False)
self.collections.append(col)
self.changed() # set the colors
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this ContourSet.
"""
if self._transform is None:
self._transform = self.ax.transData
elif (not isinstance(self._transform, mtrans.Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.ax)
return self._transform
def __getstate__(self):
state = self.__dict__.copy()
# the C object Cntr cannot currently be pickled. This isn't a big issue
# as it is not actually used once the contour has been calculated
state['Cntr'] = None
return state
def legend_elements(self, variable_name='x', str_format=str):
"""
Return a list of artist and labels suitable for passing through
to :func:`plt.legend` which represent this ContourSet.
Args:
*variable_name*: the string used inside the inequality used
on the labels
*str_format*: function used to format the numbers in the labels
"""
artists = []
labels = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
n_levels = len(self.collections)
for i, (collection, lower, upper) in enumerate(
zip(self.collections, lowers, uppers)):
patch = mpatches.Rectangle(
(0, 0), 1, 1,
facecolor=collection.get_facecolor()[0],
hatch=collection.get_hatch(),
alpha=collection.get_alpha())
artists.append(patch)
lower = str_format(lower)
upper = str_format(upper)
if i == 0 and self.extend in ('min', 'both'):
labels.append(r'$%s \leq %s$' % (variable_name,
lower))
elif i == n_levels - 1 and self.extend in ('max', 'both'):
labels.append(r'$%s > %s$' % (variable_name,
upper))
else:
labels.append(r'$%s < %s \leq %s$' % (lower,
variable_name,
upper))
else:
for collection, level in zip(self.collections, self.levels):
patch = mcoll.LineCollection(None)
patch.update_from(collection)
artists.append(patch)
# format the level for insertion into the labels
level = str_format(level)
labels.append(r'$%s = %s$' % (variable_name, level))
return artists, labels
def _process_args(self, *args, **kwargs):
"""
Process *args* and *kwargs*; override in derived classes.
Must set self.levels, self.zmin and self.zmax, and update axes
limits.
"""
self.levels = args[0]
self.allsegs = args[1]
self.allkinds = len(args) > 2 and args[2] or None
self.zmax = np.amax(self.levels)
self.zmin = np.amin(self.levels)
self._auto = False
# Check lengths of levels and allsegs.
if self.filled:
if len(self.allsegs) != len(self.levels) - 1:
raise ValueError('must be one less number of segments as '
'levels')
else:
if len(self.allsegs) != len(self.levels):
raise ValueError('must be same number of segments as levels')
# Check length of allkinds.
if (self.allkinds is not None and
len(self.allkinds) != len(self.allsegs)):
raise ValueError('allkinds has different length to allsegs')
# Determine x,y bounds and update axes data limits.
havelimits = False
for segs in self.allsegs:
for seg in segs:
seg = np.asarray(seg)
if havelimits:
min = np.minimum(min, seg.min(axis=0))
max = np.maximum(max, seg.max(axis=0))
else:
min = seg.min(axis=0)
max = seg.max(axis=0)
havelimits = True
if havelimits:
self.ax.update_datalim([min, max])
self.ax.autoscale_view(tight=True)
def _get_allsegs_and_allkinds(self):
"""
Override in derived classes to create and return allsegs and allkinds.
allkinds can be None.
"""
return self.allsegs, self.allkinds
def _get_lowers_and_uppers(self):
"""
Return (lowers,uppers) for filled contours.
"""
lowers = self._levels[:-1]
if self.zmin == lowers[0]:
# Include minimum values in lowest interval
lowers = lowers.copy() # so we don't change self._levels
if self.logscale:
lowers[0] = 0.99 * self.zmin
else:
lowers[0] -= 1
uppers = self._levels[1:]
return (lowers, uppers)
def _make_paths(self, segs, kinds):
if kinds is not None:
return [mpath.Path(seg, codes=kind)
for seg, kind in zip(segs, kinds)]
else:
return [mpath.Path(seg) for seg in segs]
def changed(self):
tcolors = [(tuple(rgba),)
for rgba in self.to_rgba(self.cvalues, alpha=self.alpha)]
self.tcolors = tcolors
hatches = self.hatches * len(tcolors)
for color, hatch, collection in zip(tcolors, hatches,
self.collections):
if self.filled:
collection.set_facecolor(color)
# update the collection's hatch (may be None)
collection.set_hatch(hatch)
else:
collection.set_color(color)
for label, cv in zip(self.labelTexts, self.labelCValues):
label.set_alpha(self.alpha)
label.set_color(self.labelMappable.to_rgba(cv))
# add label colors
cm.ScalarMappable.changed(self)
def _autolev(self, z, N):
"""
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
"""
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N + 1)
zmax = self.zmax
zmin = self.zmin
lev = self.locator.tick_values(zmin, zmax)
self._auto = True
if self.filled:
return lev
# For line contours, drop levels outside the data range.
return lev[(lev > zmin) & (lev < zmax)]
def _contour_level_args(self, z, args):
"""
Determine the contour levels and store in self.levels.
"""
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
self._auto = False
if self.levels is None:
if len(args) == 0:
lev = self._autolev(z, 7)
else:
level_arg = args[0]
try:
if type(level_arg) == int:
lev = self._autolev(z, level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError(
"Last %s arg must give levels; see help(%s)" %
(fn, fn))
self.levels = lev
if self.filled and len(self.levels) < 2:
raise ValueError("Filled contours require at least 2 levels.")
def _process_levels(self):
"""
Assign values to :attr:`layers` based on :attr:`levels`,
adding extended layers as needed if contours are filled.
For line contours, layers simply coincide with levels;
a line is a thin layer. No extended levels are needed
with line contours.
"""
# The following attributes are no longer needed, and
# should be deprecated and removed to reduce confusion.
self.vmin = np.amin(self.levels)
self.vmax = np.amax(self.levels)
# Make a private _levels to include extended regions; we
# want to leave the original levels attribute unchanged.
# (Colorbar needs this even for line contours.)
self._levels = list(self.levels)
if self.extend in ('both', 'min'):
self._levels.insert(0, min(self.levels[0], self.zmin) - 1)
if self.extend in ('both', 'max'):
self._levels.append(max(self.levels[-1], self.zmax) + 1)
self._levels = np.asarray(self._levels)
if not self.filled:
self.layers = self.levels
return
# layer values are mid-way between levels
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
# ...except that extended layers must be outside the
# normed range:
if self.extend in ('both', 'min'):
self.layers[0] = -np.inf
if self.extend in ('both', 'max'):
self.layers[-1] = np.inf
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the color mapping on the contour levels
and layers, not on the actual range of the Z values. This
means we don't have to worry about bad values in Z, and we
always have the full dynamic range available for the selected
levels.
The color is based on the midpoint of the layer, except for
extended end layers. By default, the norm vmin and vmax
are the extreme values of the non-extended levels. Hence,
the layer color extremes are not the extreme values of
the colormap itself, but approach those values as the number
of levels increases. An advantage of this scheme is that
line contours, when added to filled contours, take on
colors that are consistent with those of the filled regions;
for example, a contour line on the boundary between two
regions will have a color intermediate between those
of the regions.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
# Generate integers for direct indexing.
i0, i1 = 0, len(self.levels)
if self.filled:
i1 -= 1
# Out of range indices for over and under:
if self.extend in ('both', 'min'):
i0 = -1
if self.extend in ('both', 'max'):
i1 += 1
self.cvalues = list(range(i0, i1))
self.set_norm(colors.NoNorm())
else:
self.cvalues = self.layers
self.set_array(self.levels)
self.autoscale_None()
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
# self.tcolors are set by the "changed" method
def _process_linewidths(self):
linewidths = self.linewidths
Nlev = len(self.levels)
if linewidths is None:
tlinewidths = [(mpl.rcParams['lines.linewidth'],)] * Nlev
else:
if not cbook.iterable(linewidths):
linewidths = [linewidths] * Nlev
else:
linewidths = list(linewidths)
if len(linewidths) < Nlev:
nreps = int(np.ceil(Nlev / len(linewidths)))
linewidths = linewidths * nreps
if len(linewidths) > Nlev:
linewidths = linewidths[:Nlev]
tlinewidths = [(w,) for w in linewidths]
return tlinewidths
def _process_linestyles(self):
linestyles = self.linestyles
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
if self.monochrome:
neg_ls = mpl.rcParams['contour.negative_linestyle']
eps = - (self.zmax - self.zmin) * 1e-15
for i, lev in enumerate(self.levels):
if lev < eps:
tlinestyles[i] = neg_ls
else:
if cbook.is_string_like(linestyles):
tlinestyles = [linestyles] * Nlev
elif cbook.iterable(linestyles):
tlinestyles = list(linestyles)
if len(tlinestyles) < Nlev:
nreps = int(np.ceil(Nlev / len(linestyles)))
tlinestyles = tlinestyles * nreps
if len(tlinestyles) > Nlev:
tlinestyles = tlinestyles[:Nlev]
else:
raise ValueError("Unrecognized type for linestyles kwarg")
return tlinestyles
def get_alpha(self):
"""returns alpha to be applied to all ContourSet artists"""
return self.alpha
def set_alpha(self, alpha):
"""sets alpha for all ContourSet artists"""
self.alpha = alpha
self.changed()
def find_nearest_contour(self, x, y, indices=None, pixel=True):
"""
Finds contour that is closest to a point. Defaults to
measuring distance in pixels (screen space - useful for manual
contour labeling), but this can be controlled via a keyword
argument.
Returns a tuple containing the contour, segment, index of
segment, x & y of segment point and distance to minimum point.
Call signature::
conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour(
self, x, y, indices=None, pixel=True )
Optional keyword arguments:
*indices*:
Indexes of contour levels to consider when looking for
nearest point. Defaults to using all levels.
*pixel*:
If *True*, measure distance in pixel space, if not, measure
distance in axes space. Defaults to *True*.
"""
# This function uses a method that is probably quite
# inefficient based on converting each contour segment to
# pixel coordinates and then comparing the given point to
# those coordinates for each contour. This will probably be
# quite slow for complex contours, but for normal use it works
# sufficiently well that the time is not noticeable.
# Nonetheless, improvements could probably be made.
if indices is None:
indices = range(len(self.levels))
dmin = np.inf
conmin = None
segmin = None
xmin = None
ymin = None
point = np.array([x, y])
for icon in indices:
con = self.collections[icon]
trans = con.get_transform()
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices
# transfer all data points to screen coordinates if desired
if pixel:
lc = trans.transform(lc)
d, xc, leg = _find_closest_point_on_path(lc, point)
if d < dmin:
dmin = d
conmin = icon
segmin = segNum
imin = leg[1]
xmin = xc[0]
ymin = xc[1]
return (conmin, segmin, imin, xmin, ymin, dmin)
class QuadContourSet(ContourSet):
"""
Create and store a set of contour lines or filled regions.
User-callable method: :meth:`clabel`
Useful attributes:
ax:
The axes object in which the contours are drawn
collections:
A silent_list of LineCollections or PolyCollections
levels:
Contour levels
layers:
Same as levels for line contours; half-way between
levels for filled contours. See :meth:`_process_colors` method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Calculate and draw contour lines or filled regions, depending
on whether keyword arg 'filled' is False (default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in QuadContourSet.contour_doc.
"""
ContourSet.__init__(self, ax, *args, **kwargs)
def _process_args(self, *args, **kwargs):
"""
Process args and kwargs.
"""
if isinstance(args[0], QuadContourSet):
C = args[0].Cntr
if self.levels is None:
self.levels = args[0].levels
self.zmin = args[0].zmin
self.zmax = args[0].zmax
else:
x, y, z = self._contour_args(args, kwargs)
_mask = ma.getmask(z)
if _mask is ma.nomask:
_mask = None
C = _cntr.Cntr(x, y, z.filled(), _mask)
t = self.get_transform()
# if the transform is not trans data, and some part of it
# contains transData, transform the xs and ys to data coordinates
if (t != self.ax.transData and
any(t.contains_branch_seperately(self.ax.transData))):
trans_to_data = t - self.ax.transData
pts = (np.vstack([x.flat, y.flat]).T)
transformed_pts = trans_to_data.transform(pts)
x = transformed_pts[..., 0]
y = transformed_pts[..., 1]
x0 = ma.minimum(x)
x1 = ma.maximum(x)
y0 = ma.minimum(y)
y1 = ma.maximum(y)
self.ax.update_datalim([(x0, y0), (x1, y1)])
self.ax.autoscale_view(tight=True)
self.Cntr = C
def _get_allsegs_and_allkinds(self):
"""
Create and return allsegs and allkinds by calling underlying C code.
"""
allsegs = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
allkinds = []
for level, level_upper in zip(lowers, uppers):
nlist = self.Cntr.trace(level, level_upper,
nchunk=self.nchunk)
nseg = len(nlist) // 2
segs = nlist[:nseg]
kinds = nlist[nseg:]
allsegs.append(segs)
allkinds.append(kinds)
else:
allkinds = None
for level in self.levels:
nlist = self.Cntr.trace(level)
nseg = len(nlist) // 2
segs = nlist[:nseg]
allsegs.append(segs)
return allsegs, allkinds
def _contour_args(self, args, kwargs):
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
args = args[1:]
elif Nargs <= 4:
x, y, z = self._check_xyz(args[:3], kwargs)
args = args[3:]
else:
raise TypeError("Too many arguments to %s; see help(%s)" %
(fn, fn))
z = ma.masked_invalid(z, copy=False)
self.zmax = ma.maximum(z)
self.zmin = ma.minimum(z)
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn('Log scale: values of z <= 0 have been masked')
self.zmin = z.min()
self._contour_level_args(z, args)
return (x, y, z)
def _check_xyz(self, args, kwargs):
"""
For functions like contour, check that the dimensions
of the input arrays match; if x and y are 1D, convert
them to 2D using meshgrid.
Possible change: I think we should make and use an ArgumentError
Exception class (here and elsewhere).
"""
x, y = args[:2]
self.ax._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.ax.convert_xunits(x)
y = self.ax.convert_yunits(y)
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(args[2], dtype=np.float64)
if z.ndim != 2:
raise TypeError("Input z must be a 2D array.")
else:
Ny, Nx = z.shape
if x.ndim != y.ndim:
raise TypeError("Number of dimensions of x and y should match.")
if x.ndim == 1:
nx, = x.shape
ny, = y.shape
if nx != Nx:
raise TypeError("Length of x must be number of columns in z.")
if ny != Ny:
raise TypeError("Length of y must be number of rows in z.")
x, y = np.meshgrid(x, y)
elif x.ndim == 2:
if x.shape != z.shape:
raise TypeError("Shape of x does not match that of z: found "
"{0} instead of {1}.".format(x.shape, z.shape))
if y.shape != z.shape:
raise TypeError("Shape of y does not match that of z: found "
"{0} instead of {1}.".format(y.shape, z.shape))
else:
raise TypeError("Inputs x and y must be 1D or 2D.")
return x, y, z
def _initialize_x_y(self, z):
"""
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i,j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
"""
if z.ndim != 2:
raise TypeError("Input must be a 2D array.")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0, x1, y0, y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0, x1, y0, y1 = (0, Nx, 0, Ny)
else:
x0, x1, y0, y1 = self.extent
dx = float(x1 - x0) / Nx
dy = float(y1 - y0) / Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x, y)
contour_doc = """
Plot contours.
:func:`~matplotlib.pyplot.contour` and
:func:`~matplotlib.pyplot.contourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
:func:`~matplotlib.pyplot.contourf` differs from the MATLAB
version in that it does not draw the polygon edges.
To draw edges, add line contours with
calls to :func:`~matplotlib.pyplot.contour`.
Call signatures::
contour(Z)
make a contour plot of an array *Z*. The level values are chosen
automatically.
::
contour(X,Y,Z)
*X*, *Y* specify the (x, y) coordinates of the surface
::
contour(Z,N)
contour(X,Y,Z,N)
contour *N* automatically-chosen levels.
::
contour(Z,V)
contour(X,Y,Z,V)
draw contour lines at the values specified in sequence *V*
::
contourf(..., V)
fill the ``len(V)-1`` regions between the values in *V*
::
contour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
*X* and *Y* must both be 2-D with the same shape as *Z*, or they
must both be 1-D such that ``len(X)`` is the number of columns in
*Z* and ``len(Y)`` is the number of rows in *Z*.
``C = contour(...)`` returns a
:class:`~matplotlib.contour.QuadContourSet` object.
Optional keyword arguments:
*colors*: [ *None* | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ *None* | Colormap ]
A cm :class:`~matplotlib.colors.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*vmin*, *vmax*: [ *None* | scalar ]
If not *None*, either or both of these values will be
supplied to the :class:`matplotlib.colors.Normalize`
instance, overriding the default color scaling based on
*levels*.
*levels*: [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw; eg to draw just the zero contour pass
``levels=[0]``
*origin*: [ *None* | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ *None* | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ *None* | ticker.Locator subclass ]
If *locator* is *None*, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.colors.Colormap.set_under` and
:meth:`matplotlib.colors.Colormap.set_over` methods.
*xunits*, *yunits*: [ *None* | registered units ]
Override axis units by specifying an instance of a
:class:`matplotlib.units.ConversionInterface`.
*antialiased*: [ *True* | *False* ]
enable antialiasing, overriding the defaults. For
filled contours, the default is *True*. For line contours,
it is taken from rcParams['lines.antialiased'].
contour-only keyword arguments:
*linewidths*: [ *None* | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified
*linestyles*: [ *None* | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the default is 'solid' unless
the lines are monochrome. In that case, negative
contours will take their linestyle from the ``matplotlibrc``
``contour.negative_linestyle`` setting.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
contourf-only keyword arguments:
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of roughly *nchunk* by *nchunk*
points. This may never actually be advantageous, so this option may
be removed. Chunking introduces artifacts at the chunk boundaries
unless *antialiased* is *False*.
*hatches*:
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
Note: contourf fills intervals that are closed at the top; that
is, for boundaries *z1* and *z2*, the filled region is::
z1 < z <= z2
There is one exception: if the lowest boundary coincides with
the minimum value of the *z* array, then that minimum value
will be included in the lowest interval.
**Examples:**
.. plot:: mpl_examples/pylab_examples/contour_demo.py
.. plot:: mpl_examples/pylab_examples/contourf_demo.py
"""
| unlicense |
lpenguin/pandas-qt | pandasqt/models/DataFrameModel.py | 3 | 21416 | # -*- coding: utf-8 -*-
"""Easy integration of DataFrame into pyqt framework
@author: Jev Kuznetsov, Matthias Ludwig - Datalyze Solutions
"""
from datetime import datetime
from pandasqt.compat import Qt, QtCore, QtGui, Slot, Signal
import pandas
import numpy
import parser
import re
from pandasqt.models.ColumnDtypeModel import ColumnDtypeModel
from pandasqt.models.DataSearch import DataSearch
from pandasqt.models.SupportedDtypes import SupportedDtypes
DATAFRAME_ROLE = Qt.UserRole + 2
class DataFrameModel(QtCore.QAbstractTableModel):
"""data model for use in QTableView, QListView, QComboBox, etc.
Attributes:
timestampFormat (unicode): formatting string for conversion of timestamps to QtCore.QDateTime.
Used in data method.
sortingAboutToStart (QtCore.pyqtSignal): emitted directly before sorting starts.
sortingFinished (QtCore.pyqtSignal): emitted, when sorting finished.
dtypeChanged (Signal(columnName)): passed from related ColumnDtypeModel
if a columns dtype has changed.
changingDtypeFailed (Signal(columnName, index, dtype)):
passed from related ColumnDtypeModel.
emitted after a column has changed it's data type.
"""
_float_precisions = {
"float16": numpy.finfo(numpy.float16).precision - 2,
"float32": numpy.finfo(numpy.float32).precision - 1,
"float64": numpy.finfo(numpy.float64).precision - 1
}
"""list of int datatypes for easy checking in data() and setData()"""
_intDtypes = SupportedDtypes.intTypes() + SupportedDtypes.uintTypes()
"""list of float datatypes for easy checking in data() and setData()"""
_floatDtypes = SupportedDtypes.floatTypes()
"""list of bool datatypes for easy checking in data() and setData()"""
_boolDtypes = SupportedDtypes.boolTypes()
"""list of datetime datatypes for easy checking in data() and setData()"""
_dateDtypes = SupportedDtypes.datetimeTypes()
_timestampFormat = Qt.ISODate
sortingAboutToStart = Signal()
sortingFinished = Signal()
dtypeChanged = Signal(int, object)
changingDtypeFailed = Signal(object, QtCore.QModelIndex, object)
def __init__(self, dataFrame=None, copyDataFrame=False):
"""the __init__ method.
Args:
dataFrame (pandas.core.frame.DataFrame, optional): initializes the model with given DataFrame.
If none is given an empty DataFrame will be set. defaults to None.
copyDataFrame (bool, optional): create a copy of dataFrame or use it as is. defaults to False.
If you use it as is, you can change it from outside otherwise you have to reset the dataFrame
after external changes.
"""
super(DataFrameModel, self).__init__()
self._dataFrame = pandas.DataFrame()
if dataFrame is not None:
self.setDataFrame(dataFrame, copyDataFrame=copyDataFrame)
self._dataFrameOriginal = None
self._search = DataSearch("nothing", "")
self.editable = False
def dataFrame(self):
"""getter function to _dataFrame. Holds all data.
Note:
It's not implemented with python properties to keep Qt conventions.
"""
return self._dataFrame
def setDataFrame(self, dataFrame, copyDataFrame=False):
"""setter function to _dataFrame. Holds all data.
Note:
It's not implemented with python properties to keep Qt conventions.
Raises:
TypeError: if dataFrame is not of type pandas.core.frame.DataFrame.
Args:
dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.
copyDataFrame (bool, optional): create a copy of dataFrame or use it as is. defaults to False.
If you use it as is, you can change it from outside otherwise you have to reset the dataFrame
after external changes.
"""
if not isinstance(dataFrame, pandas.core.frame.DataFrame):
raise TypeError("not of type pandas.core.frame.DataFrame")
self.layoutAboutToBeChanged.emit()
if copyDataFrame:
self._dataFrame = dataFrame.copy()
else:
self._dataFrame = dataFrame
self._columnDtypeModel = ColumnDtypeModel(dataFrame)
self._columnDtypeModel.dtypeChanged.connect(self.propagateDtypeChanges)
# self._columnDtypeModel.changingDtypeFailed.connect(
# lambda columnName, index, dtype: self.changingDtypeFailed.emit(columnName, index, dtype)
# )
self.layoutChanged.emit()
@Slot(int, object)
def propagateDtypeChanges(self, column, dtype):
self.dtypeChanged.emit(column, dtype)
@property
def timestampFormat(self):
"""getter to _timestampFormat"""
return self._timestampFormat
@timestampFormat.setter
def timestampFormat(self, timestampFormat):
"""setter to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime
Raises:
AssertionError: if timestampFormat is not of type unicode.
Args:
timestampFormat (unicode): assign timestampFormat to _timestampFormat.
Formatting string for conversion of timestamps to QtCore.QDateTime. Used in data method.
"""
if not isinstance(timestampFormat, (unicode, )):
raise TypeError('not of type unicode')
#assert isinstance(timestampFormat, unicode) or timestampFormat.__class__.__name__ == "DateFormat", "not of type unicode"
self._timestampFormat = timestampFormat
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""return the header depending on section, orientation and Qt::ItemDataRole
Args:
section (int): For horizontal headers, the section number corresponds to the column number.
Similarly, for vertical headers, the section number corresponds to the row number.
orientation (Qt::Orientations):
role (Qt::ItemDataRole):
Returns:
None if not Qt.DisplayRole
_dataFrame.columns.tolist()[section] if orientation == Qt.Horizontal
section if orientation == Qt.Vertical
None if horizontal orientation and section raises IndexError
"""
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
try:
label = self._dataFrame.columns.tolist()[section]
if label == section:
label = section
return label
except (IndexError, ):
return None
elif orientation == Qt.Vertical:
return section
def data(self, index, role=Qt.DisplayRole):
"""return data depending on index, Qt::ItemDataRole and data type of the column.
Args:
index (QtCore.QModelIndex): Index to define column and row you want to return
role (Qt::ItemDataRole): Define which data you want to return.
Returns:
None if index is invalid
None if role is none of: DisplayRole, EditRole, CheckStateRole, DATAFRAME_ROLE
if role DisplayRole:
unmodified _dataFrame value if column dtype is object (string or unicode).
_dataFrame value as int or long if column dtype is in _intDtypes.
_dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision).
None if column dtype is in _boolDtypes.
QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template.
if role EditRole:
unmodified _dataFrame value if column dtype is object (string or unicode).
_dataFrame value as int or long if column dtype is in _intDtypes.
_dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision).
_dataFrame value as bool if column dtype is in _boolDtypes.
QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template.
if role CheckStateRole:
Qt.Checked or Qt.Unchecked if dtype is numpy.bool_ otherwise None for all other dtypes.
if role DATAFRAME_ROLE:
unmodified _dataFrame value.
raises TypeError if an unhandled dtype is found in column.
"""
if not index.isValid():
return None
def convertValue(row, col, columnDtype):
value = None
if columnDtype == object:
value = self._dataFrame.ix[row, col]
elif columnDtype in self._floatDtypes:
value = round(float(self._dataFrame.ix[row, col]), self._float_precisions[str(columnDtype)])
elif columnDtype in self._intDtypes:
value = int(self._dataFrame.ix[row, col])
elif columnDtype in self._boolDtypes:
# TODO this will most likely always be true
# See: http://stackoverflow.com/a/715455
# well no: I am mistaken here, the data is already in the dataframe
# so its already converted to a bool
value = bool(self._dataFrame.ix[row, col])
elif columnDtype in self._dateDtypes:
#print numpy.datetime64(self._dataFrame.ix[row, col])
value = pandas.Timestamp(self._dataFrame.ix[row, col])
value = QtCore.QDateTime.fromString(str(value), self.timestampFormat)
#print value
# else:
# raise TypeError, "returning unhandled data type"
return value
row = self._dataFrame.index[index.row()]
col = self._dataFrame.columns[index.column()]
columnDtype = self._dataFrame[col].dtype
if role == Qt.DisplayRole:
# return the value if you wanne show True/False as text
if columnDtype == numpy.bool:
result = self._dataFrame.ix[row, col]
else:
result = convertValue(row, col, columnDtype)
elif role == Qt.EditRole:
result = convertValue(row, col, columnDtype)
elif role == Qt.CheckStateRole:
if columnDtype == numpy.bool_:
if convertValue(row, col, columnDtype):
result = Qt.Checked
else:
result = Qt.Unchecked
else:
result = None
elif role == DATAFRAME_ROLE:
result = self._dataFrame.ix[row, col]
else:
result = None
return result
def flags(self, index):
"""Returns the item flags for the given index as ored value, e.x.: Qt.ItemIsUserCheckable | Qt.ItemIsEditable
If a combobox for bool values should pop up ItemIsEditable have to set for bool columns too.
Args:
index (QtCore.QModelIndex): Index to define column and row
Returns:
if column dtype is not boolean Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
if column dtype is boolean Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable
"""
flags = super(DataFrameModel, self).flags(index)
if not self.editable:
return flags
col = self._dataFrame.columns[index.column()]
if self._dataFrame[col].dtype == numpy.bool:
flags |= Qt.ItemIsUserCheckable
else:
# if you want to have a combobox for bool columns set this
flags |= Qt.ItemIsEditable
return flags
def setData(self, index, value, role=Qt.DisplayRole):
"""Set the value to the index position depending on Qt::ItemDataRole and data type of the column
Args:
index (QtCore.QModelIndex): Index to define column and row.
value (object): new value.
role (Qt::ItemDataRole): Use this role to specify what you want to do.
Raises:
TypeError: If the value could not be converted to a known datatype.
Returns:
True if value is changed. Calls layoutChanged after update.
False if value is not different from original value.
"""
if not index.isValid() or not self.editable:
return False
if value != index.data(role):
self.layoutAboutToBeChanged.emit()
row = self._dataFrame.index[index.row()]
col = self._dataFrame.columns[index.column()]
#print 'before change: ', index.data().toUTC(), self._dataFrame.iloc[row][col]
columnDtype = self._dataFrame[col].dtype
if columnDtype == object:
pass
elif columnDtype in self._intDtypes:
dtypeInfo = numpy.iinfo(columnDtype)
if value < dtypeInfo.min:
value = dtypeInfo.min
elif value > dtypeInfo.max:
value = dtypeInfo.max
elif columnDtype in self._floatDtypes:
value = numpy.float64(value).astype(columnDtype)
elif columnDtype in self._boolDtypes:
value = numpy.bool_(value)
elif columnDtype in self._dateDtypes:
# convert the given value to a compatible datetime object.
# if the conversation could not be done, keep the original
# value.
if isinstance(value, QtCore.QDateTime):
value = value.toString(self.timestampFormat)
try:
value = pandas.Timestamp(value)
except ValueError, e:
return False
else:
raise TypeError, "try to set unhandled data type"
self._dataFrame.set_value(row, col, value)
#print 'after change: ', value, self._dataFrame.iloc[row][col]
self.layoutChanged.emit()
return True
else:
return False
def rowCount(self, index=QtCore.QModelIndex()):
"""returns number of rows
Args:
index (QtCore.QModelIndex, optional): Index to define column and row. defaults to empty QModelIndex
Returns:
number of rows
"""
# len(df.index) is faster, so use it:
# In [12]: %timeit df.shape[0]
# 1000000 loops, best of 3: 437 ns per loop
# In [13]: %timeit len(df.index)
# 10000000 loops, best of 3: 110 ns per loop
# %timeit df.__len__()
# 1000000 loops, best of 3: 215 ns per loop
return len(self._dataFrame.index)
def columnCount(self, index=QtCore.QModelIndex()):
"""returns number of columns
Args:
index (QtCore.QModelIndex, optional): Index to define column and row. defaults to empty QModelIndex
Returns:
number of columns
"""
# speed comparison:
# In [23]: %timeit len(df.columns)
# 10000000 loops, best of 3: 108 ns per loop
# In [24]: %timeit df.shape[1]
# 1000000 loops, best of 3: 440 ns per loop
return len(self._dataFrame.columns)
def sort(self, columnId, order=Qt.AscendingOrder):
"""sort the model column
After sorting the data in ascending or descending order, a signal
`layoutChanged` is emitted.
Args:
columnId (int): columnIndex
order (Qt::SortOrder, optional): descending(1) or ascending(0). defaults to Qt.AscendingOrder
"""
self.layoutAboutToBeChanged.emit()
self.sortingAboutToStart.emit()
column = self._dataFrame.columns[columnId]
self._dataFrame.sort(column, ascending=not bool(order), inplace=True)
self.layoutChanged.emit()
self.sortingFinished.emit()
def setFilter(self, search):
"""apply a filter and hide rows.
The filter must be a `DataSearch` object, which evaluates a python
expression.
If there was an error while parsing the expression, the data will remain
unfiltered.
Args:
search(pandasqt.DataSearch): data search object to use.
Raises:
TypeError: An error is raised, if the given parameter is not a
`DataSearch` object.
"""
if not isinstance(search, DataSearch):
raise TypeError('The given parameter must an `pandasqt.DataSearch` object')
self._search = search
self.layoutAboutToBeChanged.emit()
if self._dataFrameOriginal is not None:
self._dataFrame = self._dataFrameOriginal
self._dataFrameOriginal = self._dataFrame.copy()
self._search.setDataFrame(self._dataFrame)
searchIndex, valid = self._search.search()
if valid:
self._dataFrame = self._dataFrame[searchIndex]
self.layoutChanged.emit()
else:
self.clearFilter()
self.layoutChanged.emit()
def clearFilter(self):
"""clear all filters.
"""
if self._dataFrameOriginal is not None:
self.layoutAboutToBeChanged.emit()
self._dataFrame = self._dataFrameOriginal
self._dataFrameOriginal = None
self.layoutChanged.emit()
def columnDtypeModel(self):
"""Getter for a ColumnDtypeModel.
Returns:
ColumnDtypeModel
"""
return self._columnDtypeModel
def enableEditing(self, editable):
self.editable = editable
self._columnDtypeModel.setEditable(self.editable)
def dataFrameColumns(self):
return self._dataFrame.columns.tolist()
def addDataFrameColumn(self, columnName, dtype, defaultValue):
if not self.editable or dtype not in SupportedDtypes.allTypes():
return False
elements = self.rowCount()
columnPosition = self.columnCount()
newColumn = pandas.Series([defaultValue]*elements, index=self._dataFrame.index, dtype=dtype)
self.beginInsertColumns(QtCore.QModelIndex(), columnPosition - 1, columnPosition - 1)
try:
self._dataFrame.insert(columnPosition, columnName, newColumn, allow_duplicates=False)
except ValueError, e:
# columnName does already exist
return False
self.endInsertColumns()
return True
def addDataFrameRows(self, count=1):
# don't allow any gaps in the data rows.
# and always append at the end
if not self.editable:
return False
position = self.rowCount()
if count < 1:
return False
if len(self.dataFrame().columns) == 0:
# log an error message or warning
return False
# Note: This function emits the rowsAboutToBeInserted() signal which
# connected views (or proxies) must handle before the data is
# inserted. Otherwise, the views may end up in an invalid state.
self.beginInsertRows(QtCore.QModelIndex(), position, position + count - 1)
defaultValues = []
for dtype in self._dataFrame.dtypes:
if dtype.type == numpy.dtype('<M8[ns]'):
val = pandas.Timestamp('')
elif dtype.type == numpy.dtype(object):
val = ''
else:
val = dtype.type()
defaultValues.append(val)
for i in xrange(count):
self._dataFrame.loc[position + i] = defaultValues
self._dataFrame.reset_index()
self.endInsertRows()
return True
def removeDataFrameColumns(self, columns):
if not self.editable:
return False
if columns:
deleted = 0
errorOccured = False
for (position, name) in columns:
position = position - deleted
if position < 0:
position = 0
self.beginRemoveColumns(QtCore.QModelIndex(), position, position)
try:
self._dataFrame.drop(name, axis=1, inplace=True)
except ValueError, e:
errorOccured = True
continue
self.endRemoveColumns()
deleted += 1
if errorOccured:
return False
else:
return True
return False
def removeDataFrameRows(self, rows):
if not self.editable:
return False
if rows:
position = min(rows)
count = len(rows)
self.beginRemoveRows(QtCore.QModelIndex(), position, position + count - 1)
removedAny = False
for idx, line in self._dataFrame.iterrows():
if idx in rows:
removedAny = True
self._dataFrame.drop(idx, inplace=True)
if not removedAny:
return False
self._dataFrame.reset_index(inplace=True, drop=True)
self.endRemoveRows()
return True
return False | mit |
Erotemic/ibeis | ibeis/main_module.py | 1 | 21403 | # -*- coding: utf-8 -*-
"""
This module defines the entry point into the IBEIS system
ibeis.opendb and ibeis.main are the main entry points
"""
from __future__ import absolute_import, division, print_function
#from six.moves import builtins
import sys
import multiprocessing
#try:
import utool as ut
profile = ut.profile
#profile = getattr(builtins, 'profile')
#except AttributeError:
#def profile(func):
# return func
QUIET = '--quiet' in sys.argv
NOT_QUIET = not QUIET
USE_GUI = '--gui' in sys.argv or '--nogui' not in sys.argv
def _on_ctrl_c(signal, frame):
proc_name = multiprocessing.current_process().name
print('[ibeis.main_module] Caught ctrl+c in %s' % (proc_name,))
sys.exit(0)
# try:
# _close_parallel()
# except Exception as ex:
# print('Something very bad happened' + repr(ex))
# finally:
# print('[ibeis.main_module] sys.exit(0)')
# sys.exit(0)
#-----------------------
# private init functions
def _init_signals():
import signal
signal.signal(signal.SIGINT, _on_ctrl_c)
def _reset_signals():
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL) # reset ctrl+c behavior
def _parse_args():
from ibeis import params
params.parse_args()
def _init_matplotlib():
from plottool_ibeis import __MPL_INIT__
__MPL_INIT__.init_matplotlib()
def _init_gui(activate=True):
import guitool_ibeis
if NOT_QUIET:
print('[main] _init_gui()')
guitool_ibeis.ensure_qtapp()
#USE_OLD_BACKEND = '--old-backend' in sys.argv
#if USE_OLD_BACKEND:
from ibeis.gui import guiback
back = guiback.MainWindowBackend()
#else:
# from ibeis.gui import newgui
# back = newgui.IBEISGuiWidget()
if activate:
guitool_ibeis.activate_qwindow(back.mainwin)
return back
def _init_ibeis(dbdir=None, verbose=None, use_cache=True, web=None, **kwargs):
"""
Private function that calls code to create an ibeis controller
"""
import utool as ut
from ibeis import params
from ibeis.control import IBEISControl
if verbose is None:
verbose = ut.VERBOSE
if verbose and NOT_QUIET:
print('[main] _init_ibeis()')
# Use command line dbdir unless user specifies it
if dbdir is None:
ibs = None
print('[main!] WARNING: args.dbdir is None')
else:
kwargs = kwargs.copy()
request_dbversion = kwargs.pop('request_dbversion', None)
force_serial = kwargs.get('force_serial', None)
ibs = IBEISControl.request_IBEISController(
dbdir=dbdir, use_cache=use_cache,
request_dbversion=request_dbversion,
force_serial=force_serial)
if web is None:
web = ut.get_argflag(('--webapp', '--webapi', '--web', '--browser'),
help_='automatically launch the web app / web api')
#web = params.args.webapp
if web:
from ibeis.web import app
port = params.args.webport
app.start_from_ibeis(ibs, port=port, **kwargs)
return ibs
def _init_parallel():
import utool as ut
if ut.VERBOSE:
print('_init_parallel')
from utool import util_parallel
from ibeis import params
# Import any modules which parallel process will use here
# so they are accessable when the program forks
#from utool import util_sysreq
#util_sysreq.ensure_in_pythonpath('hesaff')
#util_sysreq.ensure_in_pythonpath('pyrf')
#util_sysreq.ensure_in_pythonpath('code')
#import pyhesaff # NOQA
#import pyrf # NOQA
from ibeis import core_annots # NOQA
#.algo.preproc import preproc_chip # NOQA
util_parallel.set_num_procs(params.args.num_procs)
#if PREINIT_MULTIPROCESSING_POOLS:
# util_parallel.init_pool(params.args.num_procs)
# def _close_parallel():
# #if ut.VERBOSE:
# # print('_close_parallel')
# try:
# from utool import util_parallel
# util_parallel.close_pool(terminate=True)
# except Exception as ex:
# import utool as ut
# ut.printex(ex, 'error closing parallel')
# raise
def _init_numpy():
import utool as ut
import numpy as np
if ut.VERBOSE:
print('_init_numpy')
error_options = ['ignore', 'warn', 'raise', 'call', 'print', 'log']
on_err = error_options[0]
#np.seterr(divide='ignore', invalid='ignore')
numpy_err = {
'divide': on_err,
'over': on_err,
'under': on_err,
'invalid': on_err,
}
#numpy_print = {
# 'precision': 8,
# 'threshold': 500,
# 'edgeitems': 3,
# 'linewidth': 200, # default 75
# 'suppress': False,
# 'nanstr': 'nan',
# 'formatter': None,
#}
np.seterr(**numpy_err)
#np.set_printoptions(**numpy_print)
#-----------------------
# private loop functions
def _guitool_loop(main_locals, ipy=False):
import guitool_ibeis
from ibeis import params
print('[main] guitool_ibeis loop')
back = main_locals.get('back', None)
if back is not None:
loop_freq = params.args.loop_freq
ipy = ipy or params.args.cmd
guitool_ibeis.qtapp_loop(qwin=back.mainwin, ipy=ipy, frequency=loop_freq, init_signals=False)
if ipy: # If we're in IPython, the qtapp loop won't block, so we need to refresh
back.refresh_state()
else:
if NOT_QUIET:
print('WARNING: back was not expected to be None')
def set_newfile_permissions():
r"""
sets this processes default permission bits when creating new files
CommandLine:
python -m ibeis.main_module --test-set_newfile_permissions
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.main_module import * # NOQA
>>> import os
>>> import utool as ut
>>> # write before umask
>>> ut.delete('tempfile1.txt')
>>> ut.write_to('tempfile1.txt', 'foo')
>>> stat_result1 = os.stat('tempfile1.txt')
>>> # apply umask
>>> set_newfile_permissions()
>>> ut.delete('tempfile2.txt')
>>> ut.write_to('tempfile2.txt', 'foo')
>>> stat_result2 = os.stat('tempfile2.txt')
>>> # verify results
>>> print('old masked all bits = %o' % (stat_result1.st_mode))
>>> print('new masked all bits = %o' % (stat_result2.st_mode))
"""
import os
#import stat
# Set umask so all files written will be group read and writable
# To get the permissions we want subtract what you want from 0o0666 because
# umask subtracts the mask you give it.
#mask = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH
#mask = 0o000 # most permissive umask
mask = 0o000 # most permissive umask
prev_mask = os.umask(mask)
return prev_mask
#print('prev_mask = %o' % (prev_mask,))
#print('new_mask = %o' % (mask,))
def main(gui=True, dbdir=None, defaultdb='cache',
allow_newdir=False, db=None,
delete_ibsdir=False,
**kwargs):
"""
Program entry point
Inits the system environment, an IBEISControl, and a GUI if requested
Args:
gui (bool): (default=True) If gui is False a gui instance will not be created
dbdir (None): full directory of a database to load
db (None): name of database to load relative to the workdir
allow_newdir (bool): (default=False) if False an error is raised if a
a new database is created
defaultdb (str): codename of database to load if db and dbdir is None. a value
of 'cache' will open the last database opened with the GUI.
Returns:
dict: main_locals
"""
set_newfile_permissions()
from ibeis.init import main_commands
from ibeis.init import sysres
# Display a visible intro message
msg = '''
_____ ______ _______ _____ _______
| |_____] |______ | |______
__|__ |_____] |______ __|__ ______|
'''
if NOT_QUIET:
print(msg)
# Init the only two main system api handles
ibs = None
back = None
if NOT_QUIET:
print('[main] ibeis.main_module.main()')
_preload()
DIAGNOSTICS = NOT_QUIET
if DIAGNOSTICS:
import os
import utool as ut
import ibeis
print('[main] MAIN DIAGNOSTICS')
print('[main] * username = %r' % (ut.get_user_name()))
print('[main] * ibeis.__version__ = %r' % (ibeis.__version__,))
print('[main] * computername = %r' % (ut.get_computer_name()))
print('[main] * cwd = %r' % (os.getcwd(),))
print('[main] * sys.argv = %r' % (sys.argv,))
# Parse directory to be loaded from command line args
# and explicit kwargs
if defaultdb in ['testdb1', 'testdb0']:
from ibeis.tests.reset_testdbs import ensure_smaller_testingdbs
ensure_smaller_testingdbs()
#
dbdir = sysres.get_args_dbdir(defaultdb=defaultdb,
allow_newdir=allow_newdir, db=db,
dbdir=dbdir)
if delete_ibsdir is True:
from ibeis.other import ibsfuncs
assert allow_newdir, 'must be making new directory if you are deleting everything!'
ibsfuncs.delete_ibeis_database(dbdir)
#limit = sys.getrecursionlimit()
#if limit == 1000:
# print('Setting Recursion Limit to 3000')
# sys.setrecursionlimit(3000)
# Execute preload commands
main_commands.preload_commands(dbdir, **kwargs) # PRELOAD CMDS
try:
# Build IBEIS Control object
ibs = _init_ibeis(dbdir)
if gui and USE_GUI:
back = _init_gui(activate=kwargs.get('activate', True))
back.connect_ibeis_control(ibs)
except Exception as ex:
print('[main()] IBEIS LOAD encountered exception: %s %s' % (type(ex), ex))
raise
main_commands.postload_commands(ibs, back) # POSTLOAD CMDS
main_locals = {'ibs': ibs, 'back': back}
return main_locals
def opendb_in_background(*args, **kwargs):
"""
Starts a web server in the background
"""
import utool as ut
import time
sec = kwargs.pop('wait', 0)
if sec != 0:
raise AssertionError('wait is depricated')
print('waiting %s seconds for startup' % (sec,))
proc = ut.spawn_background_process(opendb, *args, **kwargs)
if sec != 0:
raise AssertionError('wait is depricated')
time.sleep(sec) # wait for process to initialize
return proc
def opendb_bg_web(*args, **kwargs):
"""
Wrapper around opendb_in_background, returns a nice web_ibs
object to execute web calls using normal python-like syntax
Args:
*args: passed to opendb_in_background
**kwargs:
port (int):
domain (str): if specified assumes server is already running
somewhere otherwise kwargs is passed to opendb_in_background
start_job_queue (bool)
Returns:
web_ibs - this is a KillableProcess object with special functions
CommandLine:
python -m ibeis.main_module opendb_bg_web
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.main_module import * # NOQA
>>> args = tuple()
>>> kwargs = {}
>>> print('Opening a web_ibs')
>>> web_ibs = opendb_bg_web()
>>> print('SUCESS Opened a web_ibs!')
>>> print(web_ibs)
>>> print('Now kill the web_ibs')
>>> web_ibs.terminate2()
"""
import utool as ut
from ibeis.web import appfuncs
domain = kwargs.pop('domain', ut.get_argval('--domain', type_=str, default=None))
port = kwargs.pop('port', appfuncs.DEFAULT_WEB_API_PORT)
if 'wait' in kwargs:
print('NOTE: No need to specify wait param anymore. '
'This is automatically taken care of.')
if domain is None:
# Requesting a local test server
_kw = dict(web=True, browser=False)
_kw.update(kwargs)
web_ibs = opendb_in_background(*args, **_kw)
else:
# Using a remote controller, no need to spin up anything
web_ibs = ut.DynStruct()
web_ibs.terminate2 = lambda: None
# Augment web instance with usefull test functions
if domain is None:
domain = 'http://127.0.1.1'
if not domain.startswith('http://'):
domain = 'http://' + domain
baseurl = domain + ':' + str(port)
web_ibs.domain = domain
web_ibs.port = port
web_ibs.baseurl = baseurl
def get(suffix, **kwargs):
import requests
return requests.get(baseurl + suffix)
def post(suffix, **kwargs):
import requests
return requests.post(baseurl + suffix)
def send_ibeis_request(suffix, type_='post', **kwargs):
"""
Posts a request to a url suffix
"""
import requests
import utool as ut
if not suffix.endswith('/'):
raise Exception('YOU PROBABLY WANT A / AT THE END OF YOUR URL')
payload = ut.map_dict_vals(ut.to_json, kwargs)
if type_ == 'post':
resp = requests.post(baseurl + suffix, data=payload)
json_content = resp._content
elif type_ == 'get':
resp = requests.get(baseurl + suffix, data=payload)
json_content = resp.content
try:
content = ut.from_json(json_content)
except ValueError:
raise Exception('Expected JSON string but got json_content=%r' % (json_content,))
else:
# print('content = %r' % (content,))
if content['status']['code'] != 200:
print(content['status']['message'])
raise Exception(content['status']['message'])
request_response = content['response']
return request_response
def wait_for_results(jobid, timeout=None, delays=[1, 3, 10]):
"""
Waits for results from an engine
"""
for _ in ut.delayed_retry_gen(delays):
print('Waiting for jobid = %s' % (jobid,))
status_response = web_ibs.send_ibeis_request('/api/engine/job/status/', jobid=jobid)
if status_response['jobstatus'] == 'completed':
break
return status_response
def read_engine_results(jobid):
result_response = web_ibs.send_ibeis_request('/api/engine/job/result/', jobid=jobid)
return result_response
def send_request_and_wait(suffix, type_='post', timeout=None, **kwargs):
jobid = web_ibs.send_ibeis_request(suffix, type_=type_, **kwargs)
status_response = web_ibs.wait_for_results(jobid, timeout) # NOQA
result_response = web_ibs.read_engine_results(jobid)
#>>> cmdict = ut.from_json(result_response['json_result'])[0]
return result_response
web_ibs.send_ibeis_request = send_ibeis_request
web_ibs.wait_for_results = wait_for_results
web_ibs.read_engine_results = read_engine_results
web_ibs.send_request_and_wait = send_request_and_wait
web_ibs.get = get
web_ibs.post = post
def wait_until_started():
""" waits until the web server responds to a request """
import requests
for count in ut.delayed_retry_gen([1], timeout=15):
if True or ut.VERBOSE:
print('Waiting for server to be up. count=%r' % (count,))
try:
web_ibs.send_ibeis_request('/api/test/heartbeat/', type_='get')
break
except requests.ConnectionError:
pass
wait_until_started()
return web_ibs
def opendb_fg_web(*args, **kwargs):
"""
Ignore:
>>> from ibeis.main_module import * # NOQA
>>> kwargs = {'db': 'testdb1'}
>>> args = tuple()
>>> import ibeis
>>> ibs = ibeis.opendb_fg_web()
"""
# Gives you context inside the web app for testing
kwargs['start_web_loop'] = False
kwargs['web'] = True
kwargs['browser'] = False
ibs = opendb(*args, **kwargs)
from ibeis.control import controller_inject
app = controller_inject.get_flask_app()
ibs.app = app
return ibs
def opendb(db=None, dbdir=None, defaultdb='cache', allow_newdir=False,
delete_ibsdir=False, verbose=False, use_cache=True,
web=None, **kwargs):
"""
main without the preload (except for option to delete database before
opening)
Args:
db (str): database name in your workdir used only if dbdir is None
dbdir (None): full database path
defaultdb (str): dbdir search stratagy when db is None and dbdir is
None
allow_newdir (bool): (default=True) if True errors when opening a
nonexisting database
delete_ibsdir (bool): BE CAREFUL! (default=False) if True deletes the
entire
verbose (bool): verbosity flag
web (bool): starts webserver if True (default=param specification)
use_cache (bool): if True will try to return a previously loaded
controller
Returns:
ibeis.IBEISController: ibs
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.main_module import * # NOQA
>>> db = None
>>> dbdir = None
>>> defaultdb = 'cache'
>>> allow_newdir = False
>>> delete_ibsdir = False
>>> verbose = False
>>> use_cache = True
>>> ibs = opendb(db, dbdir, defaultdb, allow_newdir, delete_ibsdir,
>>> verbose, use_cache)
>>> result = str(ibs)
>>> print(result)
"""
from ibeis.init import sysres
from ibeis.other import ibsfuncs
dbdir = sysres.get_args_dbdir(defaultdb=defaultdb,
allow_newdir=allow_newdir, db=db,
dbdir=dbdir)
if delete_ibsdir is True:
assert allow_newdir, (
'must be making new directory if you are deleting everything!')
ibsfuncs.delete_ibeis_database(dbdir)
ibs = _init_ibeis(dbdir, verbose=verbose, use_cache=use_cache, web=web,
**kwargs)
return ibs
def start(*args, **kwargs):
""" alias for main() """ # + main.__doc__
return main(*args, **kwargs)
def opendb_test(gui=True, dbdir=None, defaultdb='cache', allow_newdir=False,
db=None):
""" alias for main() """ # + main.__doc__
from ibeis.init import sysres
_preload()
dbdir = sysres.get_args_dbdir(defaultdb=defaultdb,
allow_newdir=allow_newdir, db=db,
dbdir=dbdir)
ibs = _init_ibeis(dbdir)
return ibs
def _preload(mpl=True, par=True, logging=True):
""" Sets up python environment """
import utool as ut
#from ibeis.init import main_helpers
from ibeis import params
if multiprocessing.current_process().name != 'MainProcess':
return
if ut.VERBOSE:
print('[ibeis] _preload')
_parse_args()
# mpl backends
if logging and not params.args.nologging:
# Log in the configured ibeis log dir (which is maintained by utool)
# fix this to be easier to figure out where the logs actually are
ut.start_logging(appname='ibeis')
if mpl:
_init_matplotlib()
# numpy print settings
_init_numpy()
# parallel servent processes
if par:
_init_parallel()
# ctrl+c
_init_signals()
# inject colored exceptions
ut.util_inject.inject_colored_exceptions()
# register type aliases for debugging
#main_helpers.register_utool_aliases()
#return params.args
def main_loop(main_locals, rungui=True, ipy=False, persist=True):
"""
Runs the qt loop if the GUI was initialized and returns an executable string
for embedding an IPython terminal if requested.
If rungui is False the gui will not loop even if back has been created
the main locals dict must be callsed main_locals in the scope you call this
function in.
Args:
main_locals (dict_):
rungui (bool):
ipy (bool):
persist (bool):
Returns:
str: execstr
"""
print('[main] ibeis.main_module.main_loop()')
from ibeis import params
import utool as ut
#print('current process = %r' % (multiprocessing.current_process().name,))
#== 'MainProcess':
if rungui and not params.args.nogui:
try:
_guitool_loop(main_locals, ipy=ipy)
except Exception as ex:
ut.printex(ex, 'error in main_loop')
raise
#if not persist or params.args.cmd:
# main_close()
# Put locals in the exec namespace
ipycmd_execstr = ut.ipython_execstr()
locals_execstr = ut.execstr_dict(main_locals, 'main_locals')
execstr = locals_execstr + '\n' + ipycmd_execstr
return execstr
def main_close(main_locals=None):
#import utool as ut
#if ut.VERBOSE:
# print('main_close')
# _close_parallel()
_reset_signals()
#if __name__ == '__main__':
# multiprocessing.freeze_support()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.main_module
python -m ibeis.main_module --allexamples
python -m ibeis.main_module --allexamples --noface --nosrc
"""
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 |
phdowling/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 127 | 37672 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/utils/tests/test_random.py | 85 | 7349 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
JingheZ/shogun | applications/easysvm/tutpaper/svm_params.py | 26 | 12935 |
#from matplotlib import rc
#rc('text', usetex=True)
fontsize = 16
contourFontsize = 12
showColorbar = False
xmin = -1
xmax = 1
ymin = -1.05
ymax = 1
import sys,os
import numpy
import shogun
from shogun.Kernel import GaussianKernel, LinearKernel, PolyKernel
from shogun.Features import RealFeatures, BinaryLabels
from shogun.Classifier import LibSVM
from numpy import arange
import matplotlib
from matplotlib import pylab
pylab.rcParams['contour.negative_linestyle'] = 'solid'
def features_from_file(fileName) :
fileHandle = open(fileName)
fileHandle.readline()
features = []
labels = []
for line in fileHandle :
tokens = line.split(',')
labels.append(float(tokens[1]))
features.append([float(token) for token in tokens[2:]])
return RealFeatures(numpy.transpose(numpy.array(features))), features, BinaryLabels(numpy.array(labels,numpy.float))
def create_kernel(kname, features, kparam=None) :
if kname == 'gauss' :
kernel = GaussianKernel(features, features, kparam)
elif kname == 'linear':
kernel = LinearKernel(features, features)
elif kname == 'poly' :
kernel = PolyKernel(features, features, kparam, True, False)
return kernel
def svm_train(kernel, labels, C1, C2=None):
"""Trains a SVM with the given kernel"""
num_threads = 1
kernel.io.disable_progress()
svm = LibSVM(C1, kernel, labels)
if C2:
svm.set_C(C1, C2)
svm.parallel.set_num_threads(num_threads)
svm.io.disable_progress()
svm.train()
return svm
def svm_test(svm, kernel, features_train, features_test) :
"""predicts on the test examples"""
kernel.init(features_train, features_test)
output = svm.apply().get_labels()
return output
def decision_boundary_plot(svm, features, vectors, labels, kernel, fileName = None, **args) :
title = None
if 'title' in args :
title = args['title']
xlabel = None
if 'xlabel' in args :
xlabel = args['xlabel']
ylabel = None
if 'ylabel' in args :
ylabel = args['ylabel']
fontsize = 'medium'
if 'fontsize' in args :
fontsize = args['fontsize']
contourFontsize = 10
if 'contourFontsize' in args :
contourFontsize = args['contourFontsize']
showColorbar = True
if 'showColorbar' in args :
showColorbar = args['showColorbar']
show = True
if fileName is not None :
show = False
if 'show' in args :
show = args['show']
# setting up the grid
delta = 0.005
x = arange(xmin, xmax, delta)
y = arange(ymin, ymax, delta)
Z = numpy.zeros((len(x), len(y)), numpy.float_)
gridX = numpy.zeros((len(x) *len(y), 2), numpy.float_)
n = 0
for i in range(len(x)) :
for j in range(len(y)) :
gridX[n][0] = x[i]
gridX[n][1] = y[j]
n += 1
if kernel.get_name() == 'Linear' and 'customwandb' in args:
kernel.init_optimization_svm(svm)
b=svm.get_bias()
w=kernel.get_w()
kernel.set_w(args['customwandb'][0])
svm.set_bias(args['customwandb'][1])
if kernel.get_name() == 'Linear' and 'drawarrow' in args:
kernel.init_optimization_svm(svm)
b=svm.get_bias()
w=kernel.get_w()
s=1.0/numpy.dot(w,w)/1.17
pylab.arrow(0,-b/w[1], w[0]*s,s*w[1], width=0.01, fc='#dddddd', ec='k')
grid_features = RealFeatures(numpy.transpose(gridX))
results = svm_test(svm, kernel, features, grid_features)
n = 0
for i in range(len(x)) :
for j in range(len(y)) :
Z[i][j] = results[n]
n += 1
cdict = {'red' :((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
'green':((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
'blue' :((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
}
my_cmap = matplotlib.colors.LinearSegmentedColormap('lightgray',cdict,256)
im = pylab.imshow(numpy.transpose(Z),
interpolation='bilinear', origin='lower',
cmap=my_cmap, extent=(xmin,xmax,ymin,ymax) )
if 'decisionboundaryonly' in args:
C1 = pylab.contour(numpy.transpose(Z),
[0],
origin='lower',
linewidths=(3),
colors = ['k'],
extent=(xmin,xmax,ymin,ymax))
else:
C1 = pylab.contour(numpy.transpose(Z),
[-1,0,1],
origin='lower',
linewidths=(1,3,1),
colors = ['k','k'],
extent=(xmin,xmax,ymin,ymax))
pylab.clabel(C1,
inline=1,
fmt='%1.1f',
fontsize=contourFontsize)
# plot the data
lab=labels.get_labels()
vec=numpy.array(vectors)
idx=numpy.where(lab==-1)[0]
pylab.scatter(vec[idx,0], vec[idx,1], s=300, c='#4444ff', marker='o', alpha=0.8, zorder=100)
idx=numpy.where(lab==+1)[0]
pylab.scatter(vec[idx,0], vec[idx,1], s=500, c='#ff4444', marker='s', alpha=0.8, zorder=100)
# plot SVs
if not 'decisionboundaryonly' in args:
training_outputs = svm_test(svm, kernel, features, features)
sv_idx=numpy.where(abs(training_outputs)<=1.01)[0]
pylab.scatter(vec[sv_idx,0], vec[sv_idx,1], s=100, c='k', marker='o', alpha=0.8, zorder=100)
if 'showmovedpoint' in args:
x=-0.779838709677
y=-0.1375
pylab.scatter([x], [y], s=300, c='#4e4e61', marker='o', alpha=1, zorder=100, edgecolor='#454548')
pylab.arrow(x,y-0.1, 0, -0.8/1.5, width=0.01, fc='#dddddd', ec='k')
#pylab.show()
if title is not None :
pylab.title(title, fontsize=fontsize)
if ylabel:
pylab.ylabel(ylabel,fontsize=fontsize)
if xlabel:
pylab.xlabel(xlabel,fontsize=fontsize)
if showColorbar :
pylab.colorbar(im)
# colormap:
pylab.hot()
if fileName is not None :
pylab.savefig(fileName)
if show :
pylab.show()
def add_percent_ticks():
ticks=pylab.getp(pylab.gca(),'xticks')
ticklabels=len(ticks)*['']
ticklabels[0]='0%'
ticklabels[-1]='100%'
pylab.setp(pylab.gca(), xticklabels=ticklabels)
pylab.setp(pylab.gca(), yticklabels=['0%','100%'])
ticks=pylab.getp(pylab.gca(),'yticks')
ticklabels=len(ticks)*['']
#ticklabels[0]='0%'
ticklabels[-1]='100%'
pylab.setp(pylab.gca(), yticklabels=ticklabels)
xticklabels = pylab.getp(pylab.gca(), 'xticklabels')
yticklabels = pylab.getp(pylab.gca(), 'yticklabels')
pylab.setp(xticklabels, fontsize=fontsize)
pylab.setp(yticklabels, fontsize=fontsize)
def create_figures(extension = 'pdf', directory = '../../tex/figures') :
if extension[0] != '.' :
extension = '.' + extension
dpi=90
# data and linear decision boundary
features,vectors,labels = features_from_file('data/small_gc_toy.data')
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 0.7)
pylab.figure(figsize=(8,6), dpi=dpi)
decision_boundary_plot(svm, features, vectors, labels, kernel,
fontsize=fontsize, contourFontsize=contourFontsize,
title="Linear Separation", customwandb=(numpy.array([-0.05, -1.0]), -0.3),
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
show=False, showColorbar=showColorbar, decisionboundaryonly=True)
add_percent_ticks()
pylab.savefig(os.path.join(directory, 'data_and_linear_classifier' + extension))
pylab.close()
#####################################################################################
# data and svm decision boundary
features,vectors,labels = features_from_file('data/small_gc_toy.data')
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 100)
pylab.figure(figsize=(8,6), dpi=dpi)
decision_boundary_plot(svm, features, vectors, labels, kernel,
fontsize=fontsize, contourFontsize=contourFontsize,
title="Maximum Margin Separation", drawarrow=True,
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
show=False, showColorbar=showColorbar)
add_percent_ticks()
pylab.savefig(os.path.join(directory, 'data_and_svm_classifier' + extension))
pylab.close()
#####################################################################################
# the effect of C on the decision surface:
features,vectors,labels = features_from_file('data/small_gc_toy_outlier.data')
pylab.figure(figsize=(16,6), dpi=dpi)
pylab.subplot(121)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 200)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Soft-Margin with C=200', ylabel="GC Content Before 'AG'",
xlabel="GC Content After 'AG'", fontsize=fontsize,
contourFontsize=contourFontsize, show=False, showmovedpoint=True,
showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(122)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 2)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Soft-Margin with C=2',
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False, showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'effect_of_c' + extension))
pylab.close()
####################################################################################
# playing with nonlinear data:
# the effect of kernel parameters
features,vectors,labels = features_from_file('data/small_gc_toy_outlier.data')
pylab.figure(figsize=(24,6), dpi=dpi)
pylab.subplot(131)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Linear Kernel',
ylabel="GC Content Before 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(132)
kernel = create_kernel('poly', features, 2)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Polynomial Kernel d=2',
xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(133)
kernel = create_kernel('poly', features, 5)
svm = svm_train(kernel, labels, 10)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Polynomial Kernel d=5',
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'params_polynomial' + extension))
pylab.close()
####################################################################################
#effects of sigma
pylab.figure(figsize=(24,6), dpi=dpi)
pylab.subplot(131)
gamma = 0.1
sigma = 20.0
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=20',
ylabel="GC Content Before 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(132)
sigma = 1.0
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=1',
xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(133)
sigma = 0.05
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=0.05',
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'params_gaussian' + extension))
pylab.close()
####################################################################################
if __name__ == '__main__' :
extension = 'pdf'
if len(sys.argv) > 1 :
extension = sys.argv[1]
pylab.ioff()
create_figures(extension)
| gpl-3.0 |
MaxNoe/pyhexgrid | hexgrid/utils.py | 1 | 2136 | import numpy as np
import pandas as pd
from .hexpoints import HexPoints
DIRECTIONS = {
orientation: HexPoints.from_points([
(1, -1, 0),
(1, 0, -1),
(0, 1, -1),
(-1, 1, 0),
(-1, 0, 1),
(0, -1, 1)
], orientation=orientation)
for orientation in ('flat_top', 'pointy_top')
}
def get_neighbors(hexpoints):
return [h + DIRECTIONS[h.orientation] for h in hexpoints]
def cube_round(hexpoints):
''' Round cube coordinates to nearest hexagon center '''
x = hexpoints.x
y = hexpoints.y
z = hexpoints.z
rx = np.round(x)
ry = np.round(y)
rz = np.round(z)
x_diff = np.abs(x - rx)
y_diff = np.abs(y - ry)
z_diff = np.abs(z - rz)
mask1 = np.logical_and(x_diff > y_diff, x_diff > z_diff)
rx[mask1] = -ry[mask1] - rz[mask1]
mask2 = np.logical_and(np.logical_not(mask1), y_diff > z_diff)
ry[mask2] = -rx[mask2] - rz[mask2]
mask3 = np.logical_not(mask2)
rz[mask3] = -rx[mask3] - ry[mask3]
return HexPoints(
rx, ry, rz,
data=hexpoints.data,
orientation=hexpoints.orientation,
)
def concatenate(iterable):
'''
Concatenate an iterable of HexPoints.
Parameter
---------
iterable: iterable of HexPoints
The points to concatenate
Returns
-------
conc: HexPoints
The concatenated points
'''
iterable = list(iterable)
if len(set(p.orientation for p in iterable)) > 1:
raise ValueError('All HexPoints must have the same orientation')
points = np.concatenate([p.points for p in iterable], axis=0)
data = pd.concat([p.data for p in iterable])
return HexPoints.from_points(
points,
orientation=iterable[0].orientation,
data=data,
)
def append(hexpoints1, hexpoints2):
'''
Append two instances of HexPoints.
Parameter
---------
hexpoints1: HexPoints
first instance
hexpoints2: HexPoints
second instance
Returns
-------
conc: HexPoints
The concatenated points
'''
return concatenate([hexpoints1, hexpoints2])
| mit |
Ffisegydd/python-examples | examples/scipy/using odr to fit data with errors.py | 1 | 1301 | '''Use ODR to fit a quadratic function to data with errors in
both x and y.'''
import numpy as np
import matplotlib.pyplot as plt
from scipy.odr import *
import random
# Initiate some data, giving some randomness using random.random().
x = np.array([0, 1, 2, 3, 4, 5])
y = np.array([i**2 + random.random() for i in x])
x_err = np.array([random.random() for i in x])
y_err = np.array([random.random() for i in x])
# Define a function (quadratic in our case) to fit the data with.
def quad_func(p, x):
m, c = p
return m*x**2 + c
# Create a model for fitting.
quad_model = Model(quad_func)
# Create a RealData object using our initiated data from above.
data = RealData(x, y, sx=x_err, sy=y_err)
# Set up ODR with the model and data.
odr = ODR(data, quad_model, beta0=[0., 1.])
# Run the regression.
out = odr.run()
# Use the in-built pprint method to give us results.
out.pprint()
# Generate fitted data.
x_fit = np.linspace(x[0], x[-1], 1000)
y_fit = quad_func(out.beta, x_fit)
# Generate a plot to show the data, errors, and fit.
fig, ax = plt.subplots()
ax.errorbar(x, y, xerr=x_err, yerr=y_err, linestyle='None', marker='x')
ax.plot(x_fit, y_fit)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$f(x) = x^{2}$')
ax.set_title('Using ODR to fit data with both x and y errors.')
plt.show() | mit |
dungvtdev/upsbayescpm | bayespy/inference/vmp/nodes/GaussianProcesses.py | 5 | 25953 | ################################################################################
# Copyright (C) 2011-2012 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import itertools
import numpy as np
#import scipy as sp
#import scipy.linalg.decomp_cholesky as decomp
import scipy.linalg as linalg
#import scipy.special as special
#import matplotlib.pyplot as plt
#import time
#import profile
#import scipy.spatial.distance as distance
import scipy.sparse as sp
from bayespy.utils import misc as utils
from . import node as EF
from . import CovarianceFunctions as CF
class CovarianceMatrix:
def cholesky(self):
pass
def multiply(A, B):
return np.multiply(A,B)
# m prior mean function
# k prior covariance function
# x data inputs
# z processed data outputs (z = inv(Cov) * (y-m(x)))
# U data covariance Cholesky factor
def gp_posterior_moment_function(m, k, x, y, k_sparse=None, pseudoinputs=None, noise=None):
# Prior
# FIXME: We are ignoring the covariance of mu now..
mu = m(x)[0]
## if np.ndim(mu) == 1:
## mu = np.asmatrix(mu).T
## else:
## mu = np.asmatrix(mu)
K_noise = None
if noise != None:
if K_noise is None:
K_noise = noise
else:
K_noise += noise
if k_sparse != None:
if K_noise is None:
K_noise = k_sparse(x,x)[0]
else:
K_noise += k_sparse(x,x)[0]
if pseudoinputs != None:
p = pseudoinputs
#print('in pseudostuff')
#print(K_noise)
#print(np.shape(K_noise))
K_pp = k(p,p)[0]
K_xp = k(x,p)[0]
U = utils.chol(K_noise)
# Compute Lambda
Lambda = K_pp + np.dot(K_xp.T, utils.chol_solve(U, K_xp))
U_lambda = utils.chol(Lambda)
# Compute statistics for posterior predictions
#print(np.shape(U_lambda))
#print(np.shape(y))
z = utils.chol_solve(U_lambda,
np.dot(K_xp.T,
utils.chol_solve(U,
y - mu)))
U = utils.chol(K_pp)
# Now we can forget the location of the observations and
# consider only the pseudoinputs when predicting.
x = p
else:
K = K_noise
if K is None:
K = k(x,x)[0]
else:
try:
K += k(x,x)[0]
except:
K = K + k(x,x)[0]
# Compute posterior GP
N = len(y)
U = None
z = None
if N > 0:
U = utils.chol(K)
z = utils.chol_solve(U, y-mu)
def get_moments(h, covariance=1, mean=True):
K_xh = k(x, h)[0]
if k_sparse != None:
try:
# This may not work, for instance, if either one is a
# sparse matrix.
K_xh += k_sparse(x, h)[0]
except:
K_xh = K_xh + k_sparse(x, h)[0]
# NumPy has problems when mixing matrices and arrays.
# Matrices may appear, for instance, when you sum an array and
# a sparse matrix. Make sure the result is either an array or
# a sparse matrix (not dense matrix!), because matrix objects
# cause lots of problems:
#
# array.dot(array) = array
# matrix.dot(array) = matrix
# sparse.dot(array) = array
if not sp.issparse(K_xh):
K_xh = np.asarray(K_xh)
# Function for computing posterior moments
if mean:
# Mean vector
# FIXME: Ignoring the covariance of prior mu
m_h = m(h)[0]
if z != None:
m_h += K_xh.T.dot(z)
else:
m_h = None
# Compute (co)variance matrix/vector
if covariance:
if covariance == 1:
## Compute variance vector
k_h = k(h)[0]
if k_sparse != None:
k_h += k_sparse(h)[0]
if U != None:
if isinstance(K_xh, np.ndarray):
k_h -= np.einsum('i...,i...',
K_xh,
utils.chol_solve(U, K_xh))
else:
# TODO: This isn't very efficient way, but
# einsum doesn't work for sparse matrices..
# This may consume A LOT of memory for sparse
# matrices.
k_h -= np.asarray(K_xh.multiply(utils.chol_solve(U, K_xh))).sum(axis=0)
if pseudoinputs != None:
if isinstance(K_xh, np.ndarray):
k_h += np.einsum('i...,i...',
K_xh,
utils.chol_solve(U_lambda, K_xh))
else:
# TODO: This isn't very efficient way, but
# einsum doesn't work for sparse matrices..
# This may consume A LOT of memory for sparse
# matrices.
k_h += np.asarray(K_xh.multiply(utils.chol_solve(U_lambda, K_xh))).sum(axis=0)
# Ensure non-negative variances
k_h[k_h<0] = 0
return (m_h, k_h)
elif covariance == 2:
## Compute full covariance matrix
K_hh = k(h,h)[0]
if k_sparse != None:
K_hh += k_sparse(h)[0]
if U != None:
K_hh -= K_xh.T.dot(utils.chol_solve(U,K_xh))
#K_hh -= np.dot(K_xh.T, utils.chol_solve(U,K_xh))
if pseudoinputs != None:
K_hh += K_xh.T.dot(utils.chol_solve(U_lambda, K_xh))
#K_hh += np.dot(K_xh.T, utils.chol_solve(U_lambda, K_xh))
return (m_h, K_hh)
else:
return (m_h, None)
return get_moments
# Constant function using GP mean protocol
class Constant(EF.Node):
def __init__(self, f, **kwargs):
self.f = f
EF.Node.__init__(self, dims=[(np.inf,)], **kwargs)
def message_to_child(self, gradient=False):
# Wrapper
def func(x, gradient=False):
if gradient:
return ([self.f(x), None], [])
else:
return [self.f(x), None]
return func
#class MultiDimensional(EF.NodeVariable):
# """ A multi-dimensional Gaussian process f(x). """
## class ToGaussian(EF.NodeVariable):
## """ Deterministic node which transform a Gaussian process into
## finite-dimensional Gaussian variable. """
## def __init__(self, f, x, **kwargs):
## EF.NodeVariable.__init__(self,
## f,
## x,
## plates=
## dims=
# Deterministic node for creating a set of GPs which can be used as a
# mean function to a general GP node.
class Multiple(EF.Node):
def __init__(self, GPs, **kwargs):
# Ignore plates
EF.NodeVariable.__init__(self,
*GPs,
plates=(),
dims=[(np.inf,), (np.inf,np.inf)],
**kwargs)
def message_to_parent(self, index):
raise Exception("not implemented yet")
def message_to_child(self, gradient=False):
u = [parent.message_to_child() for parent in self.parents]
def get_moments(xh, **kwargs):
mh_all = []
khh_all = []
for i in range(len(self.parents)):
xi = np.array(xh[i])
#print(xi)
#print(np.shape(xi))
#print(xi)
# FIXME: We are ignoring the covariance of mu now..
if gradient:
((mh, khh), dm) = u[i](xi, **kwargs)
else:
(mh, khh) = u[i](xi, **kwargs)
#mh = u[i](xi, **kwargs)[0]
#print(mh)
#print(mh_all)
## print(mh)
## print(khh)
## print(np.shape(mh))
mh_all = np.concatenate([mh_all, mh])
#print(np.shape(mh_all))
if khh != None:
print(khh)
raise Exception('Not implemented yet for covariances')
#khh_all = np.concatenate([khh_all, khh])
# FIXME: Compute gradients!
if gradient:
return ([mh_all, khh_all], [])
else:
return [mh_all, khh_all]
#return [mh_all, khh_all]
return get_moments
# Gaussian process distribution
class GaussianProcess(EF.Node):
def __init__(self, m, k, k_sparse=None, pseudoinputs=None, **kwargs):
self.x = np.array([])
self.f = np.array([])
## self.x_obs = np.zeros((0,1))
## self.f_obs = np.zeros((0,))
if pseudoinputs != None:
pseudoinputs = EF.NodeConstant([pseudoinputs],
dims=[np.shape(pseudoinputs)])
# By default, posterior == prior
self.m = None #m
self.k = None #k
if isinstance(k, list) and isinstance(m, list):
if len(k) != len(m):
raise Exception('The number of mean and covariance functions must be equal.')
k = CF.Multiple(k)
m = Multiple(m)
elif isinstance(k, list):
D = len(k)
k = CF.Multiple(k)
m = Multiple(D*[m])
elif isinstance(m, list):
D = len(m)
k = CF.Multiple(D*[k])
m = Multiple(m)
# Ignore plates
EF.NodeVariable.__init__(self,
m,
k,
k_sparse,
pseudoinputs,
plates=(),
dims=[(np.inf,), (np.inf,np.inf)],
**kwargs)
def __call__(self, x, covariance=None):
if not covariance:
return self.u(x, covariance=False)[0]
elif covariance.lower() == 'vector':
return self.u(x, covariance=1)
elif covariance.lower() == 'matrix':
return self.u(x, covariance=2)
else:
raise Exception("Unknown covariance type requested")
def message_to_parent(self, index):
if index == 0:
k = self.parents[1].message_to_child()[0]
K = k(self.x, self.x)
return [self.x,
self.mu,
K]
if index == 1:
raise Exception("not implemented yet")
def message_to_child(self):
if self.observed:
raise Exception("Observable GP should not have children.")
return self.u
def get_parameters(self):
return self.u
def observe(self, x, f):
self.observed = True
self.x = x
self.f = f
## if np.ndim(f) == 1:
## self.f = np.asmatrix(f).T
## else:
## self.f = np.asmatrix(f)
# You might want:
# - mean for x
# - covariance (and mean) for x
# - variance (and mean) for x
# - i.e., mean and/or (co)variance for x
# - covariance for x1 and x2
def lower_bound_contribution(self, gradient=False):
# Get moment functions from parents
m = self.parents[0].message_to_child(gradient=gradient)
k = self.parents[1].message_to_child(gradient=gradient)
if self.parents[2]:
k_sparse = self.parents[2].message_to_child(gradient=gradient)
else:
k_sparse = None
if self.parents[3]:
pseudoinputs = self.parents[3].message_to_child(gradient=gradient)
#pseudoinputs = self.parents[3].message_to_child(gradient=gradient)[0]
else:
pseudoinputs = None
## m = self.parents[0].message_to_child(gradient=gradient)[0]
## k = self.parents[1].message_to_child(gradient=gradient)[0]
# Compute the parameters (covariance matrices etc) using
# parents' moment functions
DKs_xx = []
DKd_xx = []
DKd_xp = []
DKd_pp = []
Dxp = []
Dmu = []
if gradient:
# FIXME: We are ignoring the covariance of mu now..
((mu, _), Dmu) = m(self.x, gradient=True)
## if k_sparse:
## ((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
if pseudoinputs:
((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
((xp,), Dxp) = pseudoinputs
((Kd_pp,), DKd_pp) = k(xp,xp, gradient=True)
((Kd_xp,), DKd_xp) = k(self.x, xp, gradient=True)
else:
((K_xx,), DKd_xx) = k(self.x, self.x, gradient=True)
if k_sparse:
((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
try:
K_xx += Ks_xx
except:
K_xx = K_xx + Ks_xx
else:
# FIXME: We are ignoring the covariance of mu now..
(mu, _) = m(self.x)
## if k_sparse:
## (Ks_xx,) = k_sparse(self.x, self.x)
if pseudoinputs:
(Ks_xx,) = k_sparse(self.x, self.x)
(xp,) = pseudoinputs
(Kd_pp,) = k(xp, xp)
(Kd_xp,) = k(self.x, xp)
else:
(K_xx,) = k(self.x, self.x)
if k_sparse:
(Ks_xx,) = k_sparse(self.x, self.x)
try:
K_xx += Ks_xx
except:
K_xx = K_xx + Ks_xx
mu = mu[0]
#K = K[0]
# Log pdf
if self.observed:
## Log pdf for directly observed GP
f0 = self.f - mu
#print('hereiam')
#print(K)
if pseudoinputs:
## Pseudo-input approximation
# Decompose the full-rank sparse/noise covariance matrix
try:
Us_xx = utils.cholesky(Ks_xx)
except linalg.LinAlgError:
print('Noise/sparse covariance not positive definite')
return -np.inf
# Use Woodbury-Sherman-Morrison formula with the
# following notation:
#
# y2 = f0' * inv(Kd_xp*inv(Kd_pp)*Kd_xp' + Ks_xx) * f0
#
# z = Ks_xx \ f0
# Lambda = Kd_pp + Kd_xp'*inv(Ks_xx)*Kd_xp
# nu = inv(Lambda) * (Kd_xp' * (Ks_xx \ f0))
# rho = Kd_xp * inv(Lambda) * (Kd_xp' * (Ks_xx \ f0))
#
# y2 = f0' * z - z' * rho
z = Us_xx.solve(f0)
Lambda = Kd_pp + np.dot(Kd_xp.T,
Us_xx.solve(Kd_xp))
## z = utils.chol_solve(Us_xx, f0)
## Lambda = Kd_pp + np.dot(Kd_xp.T,
## utils.chol_solve(Us_xx, Kd_xp))
try:
U_Lambda = utils.cholesky(Lambda)
#U_Lambda = utils.chol(Lambda)
except linalg.LinAlgError:
print('Lambda not positive definite')
return -np.inf
nu = U_Lambda.solve(np.dot(Kd_xp.T, z))
#nu = utils.chol_solve(U_Lambda, np.dot(Kd_xp.T, z))
rho = np.dot(Kd_xp, nu)
y2 = np.dot(f0, z) - np.dot(z, rho)
# Use matrix determinant lemma
#
# det(Kd_xp*inv(Kd_pp)*Kd_xp' + Ks_xx)
# = det(Kd_pp + Kd_xp'*inv(Ks_xx)*Kd_xp)
# * det(inv(Kd_pp)) * det(Ks_xx)
# = det(Lambda) * det(Ks_xx) / det(Kd_pp)
try:
Ud_pp = utils.cholesky(Kd_pp)
#Ud_pp = utils.chol(Kd_pp)
except linalg.LinAlgError:
print('Covariance of pseudo inputs not positive definite')
return -np.inf
logdet = (U_Lambda.logdet()
+ Us_xx.logdet()
- Ud_pp.logdet())
## logdet = (utils.logdet_chol(U_Lambda)
## + utils.logdet_chol(Us_xx)
## - utils.logdet_chol(Ud_pp))
# Compute the log pdf
L = gaussian_logpdf(y2,
0,
0,
logdet,
np.size(self.f))
# Add the variational cost of the pseudo-input
# approximation
# Compute gradients
for (dmu, func) in Dmu:
# Derivative w.r.t. mean vector
d = np.nan
# Send the derivative message
func(d)
for (dKs_xx, func) in DKs_xx:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
for (dKd_xp, func) in DKd_xp:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
V = Ud_pp.solve(Kd_xp.T)
Z = Us_xx.solve(V.T)
## V = utils.chol_solve(Ud_pp, Kd_xp.T)
## Z = utils.chol_solve(Us_xx, V.T)
for (dKd_pp, func) in DKd_pp:
# Compute derivative w.r.t. covariance matrix
d = (0.5 * np.trace(Ud_pp.solve(dKd_pp))
- 0.5 * np.trace(U_Lambda.solve(dKd_pp))
+ np.dot(nu, np.dot(dKd_pp, nu))
+ np.trace(np.dot(dKd_pp,
np.dot(V,Z))))
## d = (0.5 * np.trace(utils.chol_solve(Ud_pp, dKd_pp))
## - 0.5 * np.trace(utils.chol_solve(U_Lambda, dKd_pp))
## + np.dot(nu, np.dot(dKd_pp, nu))
## + np.trace(np.dot(dKd_pp,
## np.dot(V,Z))))
# Send the derivative message
func(d)
for (dxp, func) in Dxp:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
else:
## Full exact (no pseudo approximations)
try:
U = utils.cholesky(K_xx)
#U = utils.chol(K_xx)
except linalg.LinAlgError:
print('non positive definite, return -inf')
return -np.inf
z = U.solve(f0)
#z = utils.chol_solve(U, f0)
#print(K)
L = utils.gaussian_logpdf(np.dot(f0, z),
0,
0,
U.logdet(),
## utils.logdet_chol(U),
np.size(self.f))
for (dmu, func) in Dmu:
# Derivative w.r.t. mean vector
d = -np.sum(z)
# Send the derivative message
func(d)
for (dK, func) in DKd_xx:
# Compute derivative w.r.t. covariance matrix
#
# TODO: trace+chol_solve should be handled better
# for sparse matrices. Use sparse-inverse!
d = 0.5 * (dK.dot(z).dot(z)
- U.trace_solve_gradient(dK))
## - np.trace(U.solve(dK)))
## d = 0.5 * (dK.dot(z).dot(z)
## - np.trace(utils.chol_solve(U, dK)))
#print('derivate', d, dK)
## d = 0.5 * (np.dot(z, np.dot(dK, z))
## - np.trace(utils.chol_solve(U, dK)))
#
# Send the derivative message
func(d)
for (dK, func) in DKs_xx:
# Compute derivative w.r.t. covariance matrix
d = 0.5 * (dK.dot(z).dot(z)
- U.trace_solve_gradient(dK))
## - np.trace(U.solve(dK)))
## d = 0.5 * (dK.dot(z).dot(z)
## - np.trace(utils.chol_solve(U, dK)))
## d = 0.5 * (np.dot(z, np.dot(dK, z))
## - np.trace(utils.chol_solve(U, dK)))
# Send the derivative message
func(d)
else:
## Log pdf for latent GP
raise Exception('Not implemented yet')
return L
## Let f1 be observed and f2 latent function values.
# Compute <log p(f1,f2|m,k)>
#L = gaussian_logpdf(sum_product(np.outer(self.f,self.f) + self.Cov,
# Compute <log q(f2)>
def update(self):
# Messages from parents
m = self.parents[0].message_to_child()
k = self.parents[1].message_to_child()
if self.parents[2]:
k_sparse = self.parents[2].message_to_child()
else:
k_sparse = None
if self.parents[3]:
pseudoinputs = self.parents[3].message_to_child()[0]
else:
pseudoinputs = None
## m = self.parents[0].message_to_child()[0]
## k = self.parents[1].message_to_child()[0]
if self.observed:
# Observations of this node
self.u = gp_posterior_moment_function(m,
k,
self.x,
self.f,
k_sparse=k_sparse,
pseudoinputs=pseudoinputs)
else:
x = np.array([])
y = np.array([])
# Messages from children
for (child,index) in self.children:
(msg, mask) = child.message_to_parent(index)
# Ignoring masks and plates..
# m[0] is the inputs
x = np.concatenate((x, msg[0]), axis=-2)
# m[1] is the observations
y = np.concatenate((y, msg[1]))
# m[2] is the covariance matrix
V = linalg.block_diag(V, msg[2])
self.u = gp_posterior_moment_function(m, k, x, y, covariance=V)
self.x = x
self.f = y
# At least for now, simplify this GP node such that a GP is either
# observed or latent. If it is observed, it doesn't take messages from
# children, actually, it should not even have children!
## # Pseudo for GPFA:
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k3 = gp_cov_rq(magnitude=.., lengthscale=.., alpha=..)
## f = NodeGPSet(0, [k1,k2,k3]) # assumes block diagonality
## # f = NodeGPSet(0, [[k11,k12,k13],[k21,k22,k23],[k31,k32,k33]])
## X = GaussianFromGP(f, [ [[t0,0],[t0,1],[t0,2]], [t1,0],[t1,1],[t1,2], ..])
## ...
## # Construct a sum of GPs if interested only in the sum term
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k = gp_cov_sum(k1, k2)
## f = NodeGP(0, k)
## f.observe(x, y)
## f.update()
## (mp, kp) = f.get_parameters()
## # Construct a sum of GPs when interested also in the individual
## # GPs:
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k3 = gp_cov_delta(magnitude=theta3)
## f = NodeGPSum(0, [k1,k2,k3])
## x = np.array([1,2,3,4,5,6,7,8,9,10])
## y = np.sin(x[0]) + np.random.normal(0, 0.1, (10,))
## # Observe the sum (index 0)
## f.observe((0,x), y)
## # Inference
## f.update()
## (mp, kp) = f.get_parameters()
## # Mean of the sum
## mp[0](...)
## # Mean of the individual terms
## mp[1](...)
## mp[2](...)
## mp[3](...)
## # Covariance of the sum
## kp[0][0](..., ...)
## # Other covariances
## kp[1][1](..., ...)
## kp[2][2](..., ...)
## kp[3][3](..., ...)
## kp[1][2](..., ...)
## kp[1][3](..., ...)
## kp[2][3](..., ...)
| mit |
nmayorov/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 103 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
openego/ego.io | egoio/db_tables/model_draft.py | 1 | 302053 | # coding: utf-8
from sqlalchemy import ARRAY, BigInteger, Boolean, CHAR, CheckConstraint, Column, Date, DateTime, Float, ForeignKey, ForeignKeyConstraint, Index, Integer, JSON, Numeric, SmallInteger, String, Table, Text, UniqueConstraint, text
from geoalchemy2.types import Geometry, Raster
from sqlalchemy.dialects.postgresql.hstore import HSTORE
from sqlalchemy.dialects.postgresql.base import OID
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class AEgoDemandLaOsm(Base):
__tablename__ = 'a_ego_demand_la_osm'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.a_ego_demand_la_osm_id_seq'::regclass)"))
area_ha = Column(Float(53))
geom = Column(Geometry('POLYGON', 3035), index=True)
class AaEgoDemandLaOsm(Base):
__tablename__ = 'aa_ego_demand_la_osm'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.aa_ego_demand_la_osm_id_seq'::regclass)"))
area_ha = Column(Float(53))
geom = Column(Geometry('POLYGON', 3035), index=True)
class BkgVg250201501011Sta(Base):
__tablename__ = 'bkg_vg250_20150101_1_sta'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.bkg_vg250_20150101_1_sta_id_seq'::regclass)"))
geom = Column(Geometry('MULTIPOLYGON', 31467))
ade = Column(BigInteger)
gf = Column(BigInteger)
bsg = Column(BigInteger)
rs = Column(String(12))
ags = Column(String(12))
sdv_rs = Column(String(12))
gen = Column(String(50))
bez = Column(String(50))
ibz = Column(BigInteger)
bem = Column(String(75))
nbd = Column(String(4))
sn_l = Column(String(2))
sn_r = Column(String(1))
sn_k = Column(String(2))
sn_v1 = Column(String(2))
sn_v2 = Column(String(2))
sn_g = Column(String(3))
fk_s3 = Column(String(2))
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
wsk = Column(Date)
debkg_id = Column(String(16))
class BkgVg250201601011Sta(Base):
__tablename__ = 'bkg_vg250_20160101_1_sta'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.bkg_vg250_20160101_1_sta_id_seq'::regclass)"))
geom = Column(Geometry('MULTIPOLYGON', 31467))
ade = Column(BigInteger)
gf = Column(BigInteger)
bsg = Column(BigInteger)
rs = Column(String(12))
ags = Column(String(12))
sdv_rs = Column(String(12))
gen = Column(String(50))
bez = Column(String(50))
ibz = Column(BigInteger)
bem = Column(String(75))
nbd = Column(String(4))
sn_l = Column(String(2))
sn_r = Column(String(1))
sn_k = Column(String(2))
sn_v1 = Column(String(2))
sn_v2 = Column(String(2))
sn_g = Column(String(3))
fk_s3 = Column(String(2))
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
wsk = Column(Date)
debkg_id = Column(String(16))
class BnetzaEegAnlagenstammdaten(Base):
__tablename__ = 'bnetza_eeg_anlagenstammdaten'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.bnetza_eeg_anlagenstammdaten_registerdaten_id_seq'::regclass)"))
geom = Column(Geometry('POINT', 5652), index=True)
Meldedatum = Column(String)
Meldegrund = Column(String)
Anlagennummer = Column(String)
_1_8_EEG_Anlagenschlüssel = Column('1.8 EEG-Anlagenschl\xfcssel', String)
_3_1_Genehmigungs_datum = Column('3.1 Genehmigungs-datum', String)
_3_2_Genehmigungs_behörde = Column('3.2 Genehmigungs-beh\xf6rde', String)
_3_3_Genehmigungs_Aktenzeichen = Column('3.3 Genehmigungs-Aktenzeichen', String)
_3_4_Geplantes_Inbetriebnahme_datum = Column('3.4 Geplantes Inbetriebnahme-datum', String)
_3_5_Errichtungs_frist = Column('3.5 Errichtungs-frist', String)
_4_1_Energieträger = Column('4.1 Energietr\xe4ger', String)
_4_2_Installierte_Leistung__kW_ = Column('4.2 Installierte Leistung [kW]', String)
_4_2_1_Inst__Leistung_vor_Leistungs_änderung__ohne_Flexprämie_ = Column('4.2.1 Inst. Leistung vor Leistungs-\xe4nderung (ohne Flexpr\xe4mie)', String)
_4_2_2_Inst__Leistung_nach_Leistungs_änderung__ohne_Flexprämie = Column('4.2.2 Inst. Leistung nach Leistungs-\xe4nderung (ohne Flexpr\xe4mie', String)
_4_3_Tatsächliche_Inbetrieb_nahme = Column('4.3 Tats\xe4chliche Inbetrieb-nahme', String)
_4_4_Datum_Leistungs_änderung = Column('4.4 Datum Leistungs-\xe4nderung', String)
_4_5_Stilllegungs_datum = Column('4.5 Stilllegungs-datum', String)
_4_6_Name_der_Anlage = Column('4.6 Name der Anlage', String)
_4_7_Strasse_bzw__Flurstück = Column('4.7 Strasse bzw. Flurst\xfcck', String)
_4_8_Haus_nummer = Column('4.8 Haus-nummer', String)
_4_9_Postleit_zahl = Column('4.9 Postleit-zahl', String)
_4_10_Ort_bzw__Gemarkung = Column('4.10 Ort bzw. Gemarkung', String)
Gemeinde_schlüssel = Column('Gemeinde-schl\xfcssel', String)
_4_11_Bundesland = Column('4.11 Bundesland', String)
UTM_Zonenwert = Column('UTM-Zonenwert', Integer)
UTM_East = Column('UTM-East', Float(53))
UTM_North = Column('UTM-North', Float(53))
_4_13_Zugehörigkeit_Anlagenpark = Column('4.13 Zugeh\xf6rigkeit Anlagenpark', String)
_4_13_1__Name_des_Anlagenparks = Column('4.13.1 Name des Anlagenparks', String)
_4_14_Spannungsebene = Column('4.14 Spannungsebene', String)
_4_15_Netzanschlusspunkt = Column('4.15 Netzanschlusspunkt', String)
Zählpunktbezeichnung = Column(String)
_4_16_Name_des_Netzbetreibers = Column('4.16 Name des Netzbetreibers', String)
_4_17_Fernsteuerbarkeit_durch_ = Column('4.17 Fernsteuerbarkeit durch:', String)
_4_18_Gemeinsame_techn__Einrichtung = Column('4.18 Gemeinsame techn. Einrichtung', String)
_4_19_Inanspruchnahme_finanzielle_Förderung = Column('4.19 Inanspruchnahme finanzielle F\xf6rderung', String)
_4_20_Eigenverbrauch_geplant = Column('4.20 Eigenverbrauch geplant', String)
_5_1_Eingesetzte_Biomasse = Column('5.1 Eingesetzte Biomasse', String)
_5_2_Ausschließlich_Biomasse = Column('5.2 Ausschlie\xdflich Biomasse', String)
_5_3_Flexprämie = Column('5.3 Flexpr\xe4mie', String)
_5_4_Erstmalige_Inanspruchnahme_Flexprämie = Column('5.4 Erstmalige Inanspruchnahme Flexpr\xe4mie', String)
_5_4_1_Leistungserhöhung_Flexprämie = Column('5.4.1 Leistungserh\xf6hung Flexpr\xe4mie', String)
_5_4_2_Datum_Leistungserhöhung_Flexprämie = Column('5.4.2 Datum Leistungserh\xf6hung Flexpr\xe4mie', String)
_5_4_3_Umfang_der_Leistungserhöhung__kW_ = Column('5.4.3 Umfang der Leistungserh\xf6hung [kW]', String)
_5_5_Erstmalig_ausschließlich_Biomethan = Column('5.5 Erstmalig ausschlie\xdflich Biomethan', String)
_5_6__5_8_in_alter_Version__Zustimmung_gesonderte_Veröffentlich = Column('5.6 (5.8 in alter Version) Zustimmung gesonderte Ver\xf6ffentlich', String)
_6_1_KWK_Anlage = Column('6.1 KWK-Anlage', String)
_6_2_Thermische_Leistung__kW_ = Column('6.2 Thermische Leistung [kW]', String)
_6_3_Andere_Energieträger_vor_01_08_2014 = Column('6.3 Andere Energietr\xe4ger vor 01.08.2014', String)
_6_4_Eingesetzte_andere_Energieträger_vor_01_08_2014 = Column('6.4 Eingesetzte andere Energietr\xe4ger vor 01.08.2014', String)
_6_5_Erstmalige_Stromerzeugung = Column('6.5 Erstmalige Stromerzeugung', String)
_7_1_Windanlagenhersteller = Column('7.1 Windanlagenhersteller', String)
_7_2_Anlagentyp = Column('7.2 Anlagentyp', String)
_7_3_Nabenhöhe__m_ = Column('7.3 Nabenh\xf6he [m]', Float(53))
_7_4_Rotordurch_messer__m_ = Column('7.4 Rotordurch-messer [m]', Float(53))
_7_5_Repowering = Column('7.5 Repowering', String)
_7_6_Stilllegung_gemeldet = Column('7.6 Stilllegung gemeldet', String)
_7_7_1_Mittlere_Windge_schwindigkeit__m_s_ = Column('7.7.1 Mittlere Windge-schwindigkeit [m/s]', Float(53))
_7_7_2_Formparameter_Weibull_Verteilung = Column('7.7.2 Formparameter Weibull-Verteilung', Float(53))
_7_7_3_Skalenparameter_Weibull_Verteilung = Column('7.7.3 Skalenparameter Weibull-Verteilung', Float(53))
_7_7_4_Ertrags_einschätzung__kWh_ = Column('7.7.4 Ertrags-einsch\xe4tzung [kWh]', Float(53))
_7_7_5_Ertragseinschätzung_Referenzertrag____ = Column('7.7.5 Ertragseinsch\xe4tzung/Referenzertrag [%]', Float(53))
_7_8_1_Seelage = Column('7.8.1 Seelage', String)
_7_8_2_Wassertiefe__m_ = Column('7.8.2 Wassertiefe [m]', Float(53))
_7_8_3_Küstenentfernung__km_ = Column('7.8.3 K\xfcstenentfernung [km]', Float(53))
_7_9_Pilotwindanlage = Column('7.9 Pilotwindanlage', String)
_8_1_Ertüchtigung_Wasserkraftanlage = Column('8.1 Ert\xfcchtigung Wasserkraftanlage', String)
_8_2_Art_der_Ertüchtigung = Column('8.2 Art der Ert\xfcchtigung', String)
_8_3_Zulassungspflichtige_Maßnahme = Column('8.3 Zulassungspflichtige Ma\xdfnahme', String)
_8_4__HöheLeistungs_steigerung____ = Column('8.4. H\xf6heLeistungs-steigerung [%]', Float(53))
_8_5_Datum_der_Ertüchtigung = Column('8.5 Datum der Ert\xfcchtigung', String)
_9_1_Zuschlagnummer_PV_Freifläche = Column('9.1 Zuschlagnummer PV-Freifl\xe4che', String)
_9_2_Wie_viel_Fläche_wird_durch_die_PV_Freiflächenanlage_in_An = Column('9.2 Wie viel Fl\xe4che wird durch die PV-Freifl\xe4chenanlage in An', Float(53))
_9_3_Wie_viel_der_PV_Freifläche_ist_davon_Ackerland___ha_ = Column('9.3 Wie viel der PV-Freifl\xe4che ist davon Ackerland? [ha]', Float(53))
class BnetzaEegAnlagenstammdatenWindClassification(Base):
__tablename__ = 'bnetza_eeg_anlagenstammdaten_wind_classification'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
version = Column(Text)
meldegrund = Column(Text)
anlagennummer = Column(String(14))
_1_8_eeg_anlagenschlüssel = Column('1.8_eeg-anlagenschl\xfcssel', Text)
_4_2_installierte_leistung = Column('4.2_installierte_leistung', Float(53))
_7_1_windanlagenhersteller = Column('7.1_windanlagenhersteller', Text)
_7_2_anlagentyp = Column('7.2_anlagentyp', Text)
_7_3_nabenhöhe = Column('7.3_nabenh\xf6he', Float(53))
_7_4_rotordurch_messer = Column('7.4_rotordurch-messer', Float(53))
_7_5_repowering = Column('7.5_repowering', Text)
_7_6_stilllegung_gemeldet = Column('7.6_stilllegung_gemeldet', Text)
_7_7_1_mittlere_windgeschwindigkeit = Column('7.7.1_mittlere_windgeschwindigkeit', Float(53))
_7_7_2_formparameter_weibull_verteilung = Column('7.7.2_formparameter_weibull-verteilung', Float(53))
_7_7_3_skalenparameter_weibull_verteilung = Column('7.7.3_skalenparameter_weibull-verteilung', Float(53))
_7_7_4_ertrags_einschätzung = Column('7.7.4_ertrags-einsch\xe4tzung', Float(53))
_7_7_5_ertragseinschätzung_referenzertrag = Column('7.7.5_ertragseinsch\xe4tzung/referenzertrag', Float(53))
_7_8_1_seelage = Column('7.8.1_seelage', Text)
_7_8_2_wassertiefe = Column('7.8.2_wassertiefe', Text)
_7_8_3_küstenentfernung = Column('7.8.3_k\xfcstenentfernung', Text)
_7_9_pilotwindanlage = Column('7.9_pilotwindanlage', Text)
wea_manufacturer = Column(Text)
wea_power_class = Column(Float(53))
wea_power_revised = Column(Float(53))
wea_rotor_area = Column(Float(53))
wea_specific_power = Column(Float(53))
wea_type = Column(Text)
wea_type_comment = Column(Text)
geom = Column(Geometry('POINT', 3035), index=True)
class BnetzaEegAnlagenstammdatenWindLttc(Base):
__tablename__ = 'bnetza_eeg_anlagenstammdaten_wind_lttc'
__table_args__ = {'schema': 'model_draft'}
lttc_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.bnetza_eeg_anlagenstammdaten_wind_lttc_lttc_id_seq'::regclass)"))
version = Column(Text)
wea_count = Column(Integer)
lttc_power_sum = Column(Float(53))
lttc_area_ha = Column(Float(53))
wea_manufacturer = Column(Text)
wea_power_class = Column(Float(53))
wea_power_avg = Column(Float(53))
wea_hubhight_avg = Column(Float(53))
wea_rotor_avg = Column(Float(53))
wea_rotor_area_avg = Column(Float(53))
wea_specific_power = Column(Float(53))
wea_type = Column(Text)
wea_group = Column(Text)
geom_centroid = Column(Geometry('POINT', 3035), index=True)
geom = Column(Geometry('POLYGON', 3035), index=True)
class BuergenDistrictLandUse(Base):
__tablename__ = 'buergen_district_land_use'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
year = Column(Integer)
total_area = Column(Integer)
building_associated_open_space_area_total = Column(Integer)
building_aos_residential_area = Column(Integer)
building_aos_commercial_area = Column(Integer)
industrial_without_exploitation_area = Column(Integer)
exploitation_area = Column(String(50))
recreational_area_total = Column(Integer)
recreational_area_green_area = Column(Integer)
transport_infrastructure_total_area = Column(Integer)
street_avenue_public_square_area = Column(Integer)
agricultural_area_total = Column(Integer)
agricultural_area_moor = Column(String(50))
agricultural_area_heath = Column(String(50))
forest_area = Column(Integer)
water_area = Column(Integer)
other_area = Column(String(50))
other_area_unuseable_area = Column(String(50))
cemetery_area = Column(Integer)
settlement_and_traffic_area = Column(Integer)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_district_land_use_id_seq'::regclass)"))
class BuergenDistrictTourism(Base):
__tablename__ = 'buergen_district_tourism'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
year = Column(Integer)
tourist_accommodations = Column(Integer)
beds = Column(Integer)
overnight_stays = Column(Integer)
guest_arrivals = Column(Integer)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_district_tourism_id_seq'::regclass)"))
class BuergenGeoDistrict(Base):
__tablename__ = 'buergen_geo_district'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
ags = Column(String(50))
name = Column(String(50))
bez = Column(String(50))
nuts = Column(String(50))
debkg_id = Column(String(50))
geom = Column(Geometry('MULTIPOLYGON'))
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_geo_district_id_seq'::regclass)"))
class BuergenGeoFederalState(Base):
__tablename__ = 'buergen_geo_federal_state'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
ags = Column(String(50))
name = Column(String(50))
nuts = Column(String(50))
debkg_id = Column(Text)
geom = Column(Geometry('MULTIPOLYGON'))
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_geo_federal_state_id_seq'::regclass)"))
class BuergenGeoMunicipality(Base):
__tablename__ = 'buergen_geo_municipality'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
ags = Column(String(50))
name = Column(String(50))
bez = Column(String(50))
nuts = Column(String(50))
debkg_id = Column(String(50))
geom = Column(Geometry('MULTIPOLYGON'))
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_geo_municipality_id_seq'::regclass)"))
class BuergenGridexpIfcEnergyTransitionDistrict(Base):
__tablename__ = 'buergen_gridexp_ifc_energy_transition_district'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
qualitative_daten_vorhanden = Column(Boolean)
energiekommune = Column(Text)
hundert_prozent_ee_region = Column(Boolean)
klimaschutzkonzept_nki_energiekonzept = Column(Text)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_gridexp_ifc_energy_transition_district_id_seq'::regclass)"))
class BuergenGridexpIfcGeneralInfoDistrict(Base):
__tablename__ = 'buergen_gridexp_ifc_general_info_district'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
informationen_vorhanden = Column(Text)
erklaerung_zum_netzaubau = Column(Text)
initiative_vorhanden = Column(Text)
quelle = Column(Text)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_gridexp_ifc_general_info_district_id_seq'::regclass)"))
class BuergenGridexpIfcGeneralInfoFedstate(Base):
__tablename__ = 'buergen_gridexp_ifc_general_info_fedstate'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
umfangreiche_informationen_website = Column(Text)
erklaerung_netzausbauvorhaben_allgemein = Column(Text)
aktive_netzausbau_initiativen = Column(Text)
quellen = Column(Text)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_gridexp_ifc_general_info_fedstate_id_seq'::regclass)"))
class BuergenGridexpIfcOperatorInfoMunicipality(Base):
__tablename__ = 'buergen_gridexp_ifc_operator_info_municipality'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
netzbetreiber = Column(String(50))
netzb_gibt_info_zu_hintergrund_von_energiewirtschaft_u_gesetzen = Column(Boolean)
netzbetreiber_gibt_informationen_zum_vorgehen_beim_netzausbau = Column(Boolean)
netzbetreiber_gibt_informationen_zu_konkreten_projekten = Column(Boolean)
netzbetreiber_gibt_angebot_zu_aktuellen_projektinformationen = Column(Boolean)
netzbetreiber_stellt_telefonisch_ansprechpartner_zu_projekt = Column(Boolean)
netzbetreiber_stellt_buergersprechstunden_infomaerkte_bereit = Column(Text)
netzbetreiber_stellt_staendige_ansprechpartner_vor_ort_bereit = Column(Text)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_gridexp_ifc_operator_info_municipality_id_seq'::regclass)"))
class BuergenGridexpIfcParticipationDistrict(Base):
__tablename__ = 'buergen_gridexp_ifc_participation_district'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
format_beteiligung_am_planungsprozess = Column(String(50))
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_gridexp_ifc_participation_district_id_seq'::regclass)"))
class BuergenGridexpnIfpActionsDistrict(Base):
__tablename__ = 'buergen_gridexpn_ifp_actions_district'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
tatsaechliche_protestaktionen = Column(Text)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_gridexpn_ifp_actions_district_id_seq'::regclass)"))
class BuergenGridexpnIfpReasonsDistrict(Base):
__tablename__ = 'buergen_gridexpn_ifp_reasons_district'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
technische_planung_bauweise_trassenfuehrung = Column(Boolean)
standort_entwicklung_der_region = Column(Boolean)
verteilungsgerechtigkeit = Column(Boolean)
verfahrensgerechtigkeit = Column(Boolean)
bedarf_wird_infrage_gestellt = Column(Boolean)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_gridexpn_ifp_reasons_district_id_seq'::regclass)"))
class BuergenGridexpnIfpScopeMunicipality(Base):
__tablename__ = 'buergen_gridexpn_ifp_scope_municipality'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
regional = Column(Text)
lokal = Column(Text)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_gridexpn_ifp_scope_municipality_id_seq'::regclass)"))
class BuergenGridexpnIfpStakeholderDistrict(Base):
__tablename__ = 'buergen_gridexpn_ifp_stakeholder_district'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
anwohner = Column(Boolean)
buergerinitative = Column(Boolean)
ngo_verband = Column(Boolean)
kommunalpolitik = Column(Boolean)
wissenschaft = Column(Boolean)
unternehme_sonstige = Column(Boolean)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_gridexpn_ifp_stakeholder_district_id_seq'::regclass)"))
class BuergenGridexpnIfpStakeholderRegionalLvl(Base):
__tablename__ = 'buergen_gridexpn_ifp_stakeholder_regional_lvl'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
akteur = Column(String(50))
akteur_bezeichnung = Column(Text)
akteur_ebene = Column(String(50))
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_gridexpn_ifp_stakeholder_regional_lvl_id_seq'::regclass)"))
class BuergenWindexpnSocialAcceptanceAnalysisDistrict(Base):
__tablename__ = 'buergen_windexpn_social_acceptance_analysis_district'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
qualitative_information_wind = Column(Text)
quellen = Column(Text)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_windexpn_social_acceptance_analysis_district_id_seq'::regclass)"))
class BuergenWindexpnSocialAcceptanceAnalysisFedstate(Base):
__tablename__ = 'buergen_windexpn_social_acceptance_analysis_fedstate'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
gesellschaftliche_akzeptanz_wind_stromerzeugung_aus_ee = Column(Text)
relevante_windregion_fuer_gesellschaftliche_akzeptanz = Column(Text)
besonderheiten_umwelt_klima_energie = Column(Text)
quellen = Column(Text)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_windexpn_social_acceptance_analysis_fedstate_id_seq'::regclass)"))
class BuergenWindexpnSocialAcceptanceAnalysisMunicipality(Base):
__tablename__ = 'buergen_windexpn_social_acceptance_analysis_municipality'
__table_args__ = {'schema': 'model_draft'}
region_key = Column(String(50))
name = Column(String(50))
qualitative_information_wind = Column(Text)
quelle = Column(Text)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.buergen_windexpn_social_acceptance_analysis_municipality_id_seq'::regclass)"))
t_corr_mv_bus_results = Table(
'corr_mv_bus_results', metadata,
Column('name', Text),
Column('control', Text),
Column('type', Text),
Column('v_nom', Float(53)),
Column('v', ARRAY(Float(precision=53))),
Column('mv_grid', Integer),
Column('result_id', Integer),
Column('geom', Geometry('POINT', 4326)),
Column('v_ang', ARRAY(Float(precision=53))),
Column('p', ARRAY(Float(precision=53))),
Column('q', ARRAY(Float(precision=53))),
schema='model_draft'
)
t_corr_mv_lines_results = Table(
'corr_mv_lines_results', metadata,
Column('name', Text),
Column('bus0', Text),
Column('bus1', Text),
Column('s_nom', Float(53)),
Column('s', ARRAY(Float(precision=53))),
Column('v_nom', Float(53)),
Column('mv_grid', Integer),
Column('result_id', Integer),
Column('geom', Geometry('LINESTRING', 4326)),
Column('x', Float(53)),
Column('r', Float(53)),
Column('length', Float(53)),
schema='model_draft'
)
class CorrVisHvBus(Base):
__tablename__ = 'corr_vis_hv_bus'
__table_args__ = {'schema': 'model_draft'}
bus_id = Column(BigInteger)
v_nom = Column(Float(53))
geom = Column(Geometry('POINT', 4326))
vis_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.corr_vis_hv_bus_vis_id_seq'::regclass)"))
class CorrVisHvLine(Base):
__tablename__ = 'corr_vis_hv_lines'
__table_args__ = {'schema': 'model_draft'}
line_id = Column(BigInteger)
v_nom = Column(Float(53))
s_nom = Column(Numeric)
topo = Column(Geometry('LINESTRING', 4326))
cables = Column(Integer)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
result_id = Column(BigInteger)
s_rel_max = Column(Float(53))
rel_time_over = Column(Float(53))
s_rel = Column(Float(53))
snapshot = Column(Integer)
vis_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.corr_vis_hv_lines_vis_id_seq'::regclass)"))
max_srel = Column(Float(53))
class CorrVisMvBus(Base):
__tablename__ = 'corr_vis_mv_bus'
__table_args__ = {'schema': 'model_draft'}
name = Column(Text)
type = Column(Text)
v_nom = Column(Float(53))
mv_grid = Column(Integer)
geom = Column(Geometry('POINT', 4326))
result_id = Column(Integer)
vis_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.corr_vis_mv_bus_vis_id_seq'::regclass)"))
class CorrVisMvLine(Base):
__tablename__ = 'corr_vis_mv_lines'
__table_args__ = {'schema': 'model_draft'}
name = Column(Text)
v_nom = Column(Float(53))
s_nom = Column(Float(53))
mv_grid = Column(Integer)
geom = Column(Geometry('LINESTRING', 4326))
result_id = Column(Integer)
s_rel_max = Column(Float(53))
rel_time_over = Column(Float(53))
s_rel = Column(Float(53))
snapshot = Column(Integer)
vis_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.corr_vis_mv_lines_vis_id_seq'::regclass)"))
max_srel = Column(Float(53))
class DataTypeTest(Base):
__tablename__ = 'data_type_tests'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.data_type_tests_id_seq'::regclass)"))
year = Column(Integer)
x_r = Column(Float)
x_dp = Column(Float(53))
x_f = Column(Float(53))
x_n = Column(Numeric)
x_d = Column(Numeric)
class DeaGermanyPerLoadArea(Base):
__tablename__ = 'dea_germany_per_load_area'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
subst_id = Column(Integer)
lv_dea_cnt = Column(Integer)
lv_dea_capacity = Column(Numeric)
class DestatisZensusPopulationPerHaInside(Base):
__tablename__ = 'destatis_zensus_population_per_ha_inside'
__table_args__ = {'schema': 'model_draft'}
gid = Column(Integer, primary_key=True)
inside_borders = Column(Boolean)
t_destatis_zensus_population_per_ha_invg_mview = Table(
'destatis_zensus_population_per_ha_invg_mview', metadata,
Column('gid', Integer, unique=True),
Column('population', Numeric(10, 0)),
Column('inside_borders', Boolean),
Column('geom_point', Geometry('POINT', 3035), index=True),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
class DestatisZensusPopulationPerHaMvgdla(Base):
__tablename__ = 'destatis_zensus_population_per_ha_mvgdla'
__table_args__ = {'schema': 'model_draft'}
gid = Column(Integer, primary_key=True)
population = Column(Integer)
inside = Column(Boolean)
geom_point = Column(Geometry('POINT', 3035), index=True)
t_destatis_zensus_population_per_ha_outvg_mview = Table(
'destatis_zensus_population_per_ha_outvg_mview', metadata,
Column('gid', Integer, unique=True),
Column('population', Numeric(10, 0)),
Column('inside_borders', Boolean),
Column('geom_point', Geometry('POINT', 3035), index=True),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
class EgoBoundariesBkgVg2506GemClean(Base):
__tablename__ = 'ego_boundaries_bkg_vg250_6_gem_clean'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_political_boundary_bkg_vg250_6_gem_clean_id_seq'::regclass)"))
old_id = Column(Integer)
gen = Column(Text)
bez = Column(Text)
bem = Column(Text)
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
area_ha = Column(Numeric)
count_hole = Column(Integer)
path = Column(ARRAY(Integer()))
is_hole = Column(Boolean)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoBoundariesHvmvSubstPerGem(Base):
__tablename__ = 'ego_boundaries_hvmv_subst_per_gem'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
old_id = Column(Integer)
gen = Column(Text)
bez = Column(Text)
bem = Column(Text)
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
area_ha = Column(Numeric)
count_hole = Column(Integer)
path = Column(ARRAY(Integer()))
is_hole = Column(Boolean)
geom = Column(Geometry('POLYGON', 3035), index=True)
subst_sum = Column(Integer)
subst_type = Column(Integer)
t_ego_boundaries_hvmv_subst_per_gem_1_mview = Table(
'ego_boundaries_hvmv_subst_per_gem_1_mview', metadata,
Column('id', Integer, unique=True),
Column('gen', Text),
Column('bez', Text),
Column('ags_0', String(12)),
Column('subst_type', Integer),
Column('geom', Geometry('POLYGON', 3035)),
schema='model_draft'
)
t_ego_boundaries_hvmv_subst_per_gem_2_mview = Table(
'ego_boundaries_hvmv_subst_per_gem_2_mview', metadata,
Column('id', Integer, unique=True),
Column('gen', Text),
Column('bez', Text),
Column('ags_0', String(12)),
Column('subst_type', Integer),
Column('geom', Geometry('POLYGON', 3035)),
schema='model_draft'
)
t_ego_boundaries_hvmv_subst_per_gem_3_mview = Table(
'ego_boundaries_hvmv_subst_per_gem_3_mview', metadata,
Column('id', Integer, unique=True),
Column('gen', Text),
Column('bez', Text),
Column('ags_0', String(12)),
Column('subst_type', Integer),
Column('geom', Geometry('POLYGON', 3035)),
schema='model_draft'
)
class EgoBoundariesHvmvSubstPerGem3Nn(Base):
__tablename__ = 'ego_boundaries_hvmv_subst_per_gem_3_nn'
__table_args__ = {'schema': 'model_draft'}
mun_id = Column(Integer, primary_key=True)
mun_ags_0 = Column(String(12))
subst_ags_0 = Column(Text)
subst_id = Column(Integer)
subst_type = Column(Integer)
geom_sub = Column(Geometry('POINT', 3035), index=True)
distance = Column(Float(53))
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
t_ego_boundaries_hvmv_subst_per_gem_3_nn_line = Table(
'ego_boundaries_hvmv_subst_per_gem_3_nn_line', metadata,
Column('id', BigInteger, unique=True),
Column('nn_id', Integer),
Column('subst_id', Integer),
Column('geom_centre', Geometry('POINT', 3035), index=True),
Column('geom', Geometry('LINESTRING', 3035), index=True),
schema='model_draft'
)
t_ego_boundaries_hvmv_subst_per_gem_3_nn_union = Table(
'ego_boundaries_hvmv_subst_per_gem_3_nn_union', metadata,
Column('subst_id', Integer, unique=True),
Column('subst_type', Integer),
Column('geom', Geometry('MULTIPOLYGON', 3035), index=True),
schema='model_draft'
)
t_ego_conv_powerplant_costdat_gid = Table(
'ego_conv_powerplant_costdat_gid', metadata,
Column('id', Integer),
Column('coastdat_gid', BigInteger),
schema='model_draft'
)
class EgoDataProcessingCleanRun(Base):
__tablename__ = 'ego_data_processing_clean_run'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_data_processing_clean_run_id_seq'::regclass)"))
version = Column(Text)
schema_name = Column(Text)
table_name = Column(Text)
script_name = Column(Text)
entries = Column(Integer)
status = Column(Text)
timestamp = Column(DateTime)
user_name = Column(Text)
class EgoDataProcessingResult(Base):
__tablename__ = 'ego_data_processing_results'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_data_processing_results_id_seq'::regclass)"))
version = Column(Text)
schema_name = Column(Text)
table_name = Column(Text)
description = Column(Text)
result = Column(Integer)
unit = Column(Text)
timestamp = Column(DateTime)
class EgoDataProcessingResultsMvgd(Base):
__tablename__ = 'ego_data_processing_results_mvgd'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
type1 = Column(Integer)
type1_cnt = Column(Integer)
type2 = Column(Integer)
type2_cnt = Column(Integer)
type3 = Column(Integer)
type3_cnt = Column(Integer)
gem = Column(Integer)
gem_clean = Column(Integer)
la_count = Column(Integer)
area_ha = Column(Numeric(10, 1))
la_area = Column(Numeric(10, 1))
free_area = Column(Numeric(10, 1))
area_share = Column(Numeric(4, 1))
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
timestamp = Column(DateTime)
compound = Column(Text)
group = Column(CHAR(1))
consumption = Column(Numeric)
consumption_per_area = Column(Numeric)
class EgoDeaAgriculturalSectorPerGridDistrict(Base):
__tablename__ = 'ego_dea_agricultural_sector_per_grid_district'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_dea_agricultural_sector_per_grid_district_id_seq'::regclass)"))
subst_id = Column(Integer)
area_ha = Column(Numeric)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoDeaAllocation(Base):
__tablename__ = 'ego_dea_allocation'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True)
sort = Column(Integer)
electrical_capacity = Column(Numeric)
generation_type = Column(Text)
generation_subtype = Column(String)
voltage_level = Column(String)
postcode = Column(String)
subst_id = Column(Integer)
source = Column(String)
la_id = Column(Integer)
flag = Column(String)
geom = Column(Geometry('POINT', 3035), index=True)
geom_line = Column(Geometry('LINESTRING', 3035), index=True)
geom_new = Column(Geometry('POINT', 3035), index=True)
t_ego_dea_allocation_m1_1_a_mview = Table(
'ego_dea_allocation_m1_1_a_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_m1_1_mview = Table(
'ego_dea_allocation_m1_1_mview', metadata,
Column('id', BigInteger),
Column('sort', Integer),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('postcode', String),
Column('subst_id', Integer),
Column('source', String),
Column('la_id', Integer),
Column('flag', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('geom_line', Geometry('LINESTRING', 3035), index=True),
Column('geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_dea_allocation_m1_1_rest_mview = Table(
'ego_dea_allocation_m1_1_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_m1_2_a_mview = Table(
'ego_dea_allocation_m1_2_a_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_m1_2_mview = Table(
'ego_dea_allocation_m1_2_mview', metadata,
Column('id', BigInteger),
Column('sort', Integer),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('postcode', String),
Column('subst_id', Integer),
Column('source', String),
Column('la_id', Integer),
Column('flag', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('geom_line', Geometry('LINESTRING', 3035), index=True),
Column('geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_dea_allocation_m1_2_rest_mview = Table(
'ego_dea_allocation_m1_2_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_m2_a_mview = Table(
'ego_dea_allocation_m2_a_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_m2_mview = Table(
'ego_dea_allocation_m2_mview', metadata,
Column('id', BigInteger),
Column('sort', Integer),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('postcode', String),
Column('subst_id', Integer),
Column('source', String),
Column('la_id', Integer),
Column('flag', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('geom_line', Geometry('LINESTRING', 3035), index=True),
Column('geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_dea_allocation_m2_rest_mview = Table(
'ego_dea_allocation_m2_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_m3_a_mview = Table(
'ego_dea_allocation_m3_a_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_m3_mview = Table(
'ego_dea_allocation_m3_mview', metadata,
Column('id', BigInteger),
Column('sort', Integer),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('postcode', String),
Column('subst_id', Integer),
Column('source', String),
Column('la_id', Integer),
Column('flag', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('geom_line', Geometry('LINESTRING', 3035), index=True),
Column('geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_dea_allocation_m3_rest_mview = Table(
'ego_dea_allocation_m3_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_m4_a_mview = Table(
'ego_dea_allocation_m4_a_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_m4_mview = Table(
'ego_dea_allocation_m4_mview', metadata,
Column('id', BigInteger),
Column('sort', Integer),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('postcode', String),
Column('subst_id', Integer),
Column('source', String),
Column('la_id', Integer),
Column('flag', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('geom_line', Geometry('LINESTRING', 3035), index=True),
Column('geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_dea_allocation_m4_rest_mview = Table(
'ego_dea_allocation_m4_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_m5_a_mview = Table(
'ego_dea_allocation_m5_a_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_m5_mview = Table(
'ego_dea_allocation_m5_mview', metadata,
Column('id', BigInteger),
Column('sort', Integer),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('postcode', String),
Column('subst_id', Integer),
Column('source', String),
Column('la_id', Integer),
Column('flag', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('geom_line', Geometry('LINESTRING', 3035), index=True),
Column('geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_dea_allocation_m5_rest_mview = Table(
'ego_dea_allocation_m5_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('subst_id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('flag', String),
schema='model_draft'
)
t_ego_dea_allocation_out_mview = Table(
'ego_dea_allocation_out_mview', metadata,
Column('id', BigInteger),
Column('sort', Integer),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', String),
Column('postcode', String),
Column('subst_id', Integer),
Column('source', String),
Column('la_id', Integer),
Column('flag', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('geom_line', Geometry('LINESTRING', 3035), index=True),
Column('geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
class EgoDeaPerGenerationTypeAndVoltageLevel(Base):
__tablename__ = 'ego_dea_per_generation_type_and_voltage_level'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True)
generation_type = Column(Text)
generation_subtype = Column(String)
voltage_level = Column(String)
capacity = Column(Numeric)
count = Column(BigInteger)
class EgoDeaPerGridDistrict(Base):
__tablename__ = 'ego_dea_per_grid_district'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
lv_dea_cnt = Column(Integer)
lv_dea_capacity = Column(Numeric)
mv_dea_cnt = Column(Integer)
mv_dea_capacity = Column(Numeric)
class EgoDeaPerLoadArea(Base):
__tablename__ = 'ego_dea_per_load_area'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
subst_id = Column(Integer)
lv_dea_cnt = Column(Integer)
lv_dea_capacity = Column(Numeric)
class EgoDeaPerMethod(Base):
__tablename__ = 'ego_dea_per_method'
__table_args__ = {'schema': 'model_draft'}
name = Column(Text, primary_key=True)
capacity = Column(Numeric)
count = Column(BigInteger)
class EgoDemandHvLargescaleconsumer(Base):
__tablename__ = 'ego_demand_hv_largescaleconsumer'
__table_args__ = {'schema': 'model_draft'}
polygon_id = Column(Integer, primary_key=True)
area_ha = Column(Float(53))
powerplant_id = Column(Integer)
voltage_level = Column(SmallInteger)
subst_id = Column(Integer)
otg_id = Column(Integer)
un_id = Column(Integer)
consumption = Column(Numeric)
peak_load = Column(Numeric)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
geom_centre = Column(Geometry('POINT', 3035), index=True)
class EgoDemandHvmvDemand(Base):
__tablename__ = 'ego_demand_hvmv_demand'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_demand_hvmv_demand_id_seq'::regclass)"))
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
class EgoDemandLaBufferbug(Base):
__tablename__ = 'ego_demand_la_bufferbug'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
oid = Column(Integer)
comment = Column(Text)
part = Column(Text)
geom = Column(Geometry('POLYGON', 3035))
class EgoDemandLaOsm(Base):
__tablename__ = 'ego_demand_la_osm'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_demand_la_osm_id_seq'::regclass)"))
area_ha = Column(Float(53))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoDemandLaZensu(Base):
__tablename__ = 'ego_demand_la_zensus'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_demand_la_zensus_id_seq'::regclass)"))
gid = Column(Integer)
population = Column(Integer)
inside_la = Column(Boolean)
geom_point = Column(Geometry('POINT', 3035), index=True)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoDemandLaZensusCluster(Base):
__tablename__ = 'ego_demand_la_zensus_cluster'
__table_args__ = {'schema': 'model_draft'}
cid = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_demand_la_zensus_cluster_cid_seq'::regclass)"))
zensus_sum = Column(Integer)
area_ha = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
geom_buffer = Column(Geometry('POLYGON', 3035))
geom_centroid = Column(Geometry('POINT', 3035), index=True)
geom_surfacepoint = Column(Geometry('POINT', 3035), index=True)
class EgoDemandLoadCollect(Base):
__tablename__ = 'ego_demand_load_collect'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_demand_load_collect_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoDemandLoadCollectBuffer100(Base):
__tablename__ = 'ego_demand_load_collect_buffer100'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_demand_load_collect_buffer100_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoDemandLoadMelt(Base):
__tablename__ = 'ego_demand_load_melt'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_demand_load_melt_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoDemandLoadMelt99(Base):
__tablename__ = 'ego_demand_load_melt_99'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_demand_load_melt_99_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
t_ego_demand_load_melt_error_2_geom_mview = Table(
'ego_demand_load_melt_error_2_geom_mview', metadata,
Column('id', Integer, unique=True),
Column('error', Boolean),
Column('error_reason', String),
Column('error_location', Geometry('POINT', 3035)),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
t_ego_demand_load_melt_error_geom_fix_mview = Table(
'ego_demand_load_melt_error_geom_fix_mview', metadata,
Column('id', Integer, unique=True),
Column('error', Boolean),
Column('geom_type', Text),
Column('area', Float(53)),
Column('geom_buffer', Geometry('POLYGON', 3035)),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
t_ego_demand_load_melt_error_geom_mview = Table(
'ego_demand_load_melt_error_geom_mview', metadata,
Column('id', Integer, unique=True),
Column('error', Boolean),
Column('error_reason', String),
Column('error_location', Geometry('POINT', 3035)),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
class EgoDemandLoadarea(Base):
__tablename__ = 'ego_demand_loadarea'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_demand_loadarea_id_seq'::regclass)"))
subst_id = Column(Integer)
area_ha = Column(Numeric)
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
otg_id = Column(Integer)
un_id = Column(Integer)
zensus_sum = Column(Integer)
zensus_count = Column(Integer)
zensus_density = Column(Numeric)
ioer_sum = Column(Numeric)
ioer_count = Column(Integer)
ioer_density = Column(Numeric)
sector_area_residential = Column(Numeric)
sector_area_retail = Column(Numeric)
sector_area_industrial = Column(Numeric)
sector_area_agricultural = Column(Numeric)
sector_area_sum = Column(Numeric)
sector_share_residential = Column(Numeric)
sector_share_retail = Column(Numeric)
sector_share_industrial = Column(Numeric)
sector_share_agricultural = Column(Numeric)
sector_share_sum = Column(Numeric)
sector_count_residential = Column(Integer)
sector_count_retail = Column(Integer)
sector_count_industrial = Column(Integer)
sector_count_agricultural = Column(Integer)
sector_count_sum = Column(Integer)
sector_consumption_residential = Column(Float(53))
sector_consumption_retail = Column(Float(53))
sector_consumption_industrial = Column(Float(53))
sector_consumption_agricultural = Column(Float(53))
sector_consumption_sum = Column(Float(53))
sector_peakload_retail = Column(Float(53))
sector_peakload_residential = Column(Float(53))
sector_peakload_industrial = Column(Float(53))
sector_peakload_agricultural = Column(Float(53))
geom_centroid = Column(Geometry('POINT', 3035), index=True)
geom_surfacepoint = Column(Geometry('POINT', 3035), index=True)
geom_centre = Column(Geometry('POINT', 3035), index=True)
geom = Column(Geometry('POLYGON', 3035), index=True)
t_ego_demand_loadarea_error_noags_mview = Table(
'ego_demand_loadarea_error_noags_mview', metadata,
Column('id', Integer, unique=True),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
class EgoDemandLoadareaPeakLoad(Base):
__tablename__ = 'ego_demand_loadarea_peak_load'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, index=True, server_default=text("nextval('model_draft.ego_demand_loadarea_peak_load_id_seq'::regclass)"))
retail = Column(Float(53))
residential = Column(Float(53))
industrial = Column(Float(53))
agricultural = Column(Float(53))
t_ego_demand_loadarea_smaller100m2_mview = Table(
'ego_demand_loadarea_smaller100m2_mview', metadata,
Column('id', Integer, unique=True),
Column('area_ha', Numeric),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
class EgoDemandLoadareaTemp(Base):
__tablename__ = 'ego_demand_loadarea_temp'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
subst_id = Column(Integer)
area_ha = Column(Numeric)
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
otg_id = Column(Integer)
un_id = Column(Integer)
zensus_sum = Column(Integer)
zensus_count = Column(Integer)
zensus_density = Column(Numeric)
ioer_sum = Column(Numeric)
ioer_count = Column(Integer)
ioer_density = Column(Numeric)
sector_area_residential = Column(Numeric)
sector_area_retail = Column(Numeric)
sector_area_industrial = Column(Numeric)
sector_area_agricultural = Column(Numeric)
sector_area_sum = Column(Numeric)
sector_share_residential = Column(Numeric)
sector_share_retail = Column(Numeric)
sector_share_industrial = Column(Numeric)
sector_share_agricultural = Column(Numeric)
sector_share_sum = Column(Numeric)
sector_count_residential = Column(Integer)
sector_count_retail = Column(Integer)
sector_count_industrial = Column(Integer)
sector_count_agricultural = Column(Integer)
sector_count_sum = Column(Integer)
sector_consumption_residential = Column(Float(53))
sector_consumption_retail = Column(Float(53))
sector_consumption_industrial = Column(Float(53))
sector_consumption_agricultural = Column(Float(53))
sector_consumption_sum = Column(Float(53))
geom_centroid = Column(Geometry('POINT', 3035))
geom_surfacepoint = Column(Geometry('POINT', 3035))
geom_centre = Column(Geometry('POINT', 3035))
geom = Column(Geometry('POLYGON', 3035))
class EgoDemandLoadareaVoi(Base):
__tablename__ = 'ego_demand_loadarea_voi'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_demand_loadarea_voi_id_seq'::regclass)"))
subst_id = Column(Integer)
area_ha = Column(Numeric)
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
otg_id = Column(Integer)
un_id = Column(Integer)
zensus_sum = Column(Integer)
zensus_count = Column(Integer)
zensus_density = Column(Numeric)
ioer_sum = Column(Numeric)
ioer_count = Column(Integer)
ioer_density = Column(Numeric)
sector_area_residential = Column(Numeric)
sector_area_retail = Column(Numeric)
sector_area_industrial = Column(Numeric)
sector_area_agricultural = Column(Numeric)
sector_area_sum = Column(Numeric)
sector_share_residential = Column(Numeric)
sector_share_retail = Column(Numeric)
sector_share_industrial = Column(Numeric)
sector_share_agricultural = Column(Numeric)
sector_share_sum = Column(Numeric)
sector_count_residential = Column(Integer)
sector_count_retail = Column(Integer)
sector_count_industrial = Column(Integer)
sector_count_agricultural = Column(Integer)
sector_count_sum = Column(Integer)
sector_consumption_residential = Column(Numeric)
sector_consumption_retail = Column(Numeric)
sector_consumption_industrial = Column(Numeric)
sector_consumption_agricultural = Column(Numeric)
sector_consumption_sum = Column(Numeric)
geom_centroid = Column(Geometry('POINT', 3035), index=True)
geom_surfacepoint = Column(Geometry('POINT', 3035), index=True)
geom_centre = Column(Geometry('POINT', 3035), index=True)
geom = Column(Geometry('POLYGON', 3035), index=True)
t_ego_demand_loadarea_voi_error_noags_mview = Table(
'ego_demand_loadarea_voi_error_noags_mview', metadata,
Column('id', Integer, unique=True),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
t_ego_demand_loadarea_voi_smaller100m2_mview = Table(
'ego_demand_loadarea_voi_smaller100m2_mview', metadata,
Column('id', Integer, unique=True),
Column('area_ha', Numeric),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
class EgoDemandLoad(Base):
__tablename__ = 'ego_demand_loads'
__table_args__ = {'schema': 'model_draft'}
un_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_demand_loads_un_id_seq'::regclass)"))
ssc_id = Column(Integer)
lsc_id = Column(Integer)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoDemandPerDistrict(Base):
__tablename__ = 'ego_demand_per_district'
__table_args__ = {'schema': 'model_draft'}
eu_code = Column(String(7), primary_key=True)
district = Column(String)
elec_consumption_industry = Column(Float(53))
elec_consumption_tertiary_sector = Column(Float(53))
area_industry = Column(Float(53))
consumption_per_area_industry = Column(Float(53))
area_retail = Column(Float(53))
area_agriculture = Column(Float(53))
area_tertiary_sector = Column(Float(53))
class EgoDemandPerGva(Base):
__tablename__ = 'ego_demand_per_gva'
__table_args__ = {'schema': 'model_draft'}
eu_code = Column(String(7), primary_key=True)
federal_states = Column(String)
elec_consumption_industry = Column(Float(53))
elec_consumption_tertiary_sector = Column(Float(53))
t_ego_demand_per_gva_test = Table(
'ego_demand_per_gva_test', metadata,
Column('eu_code', String(7)),
Column('federal_states', String),
Column('elec_consumption_industry', Float(53)),
Column('elec_consumption_tertiary_sector', Float(53)),
schema='model_draft'
)
class EgoDeuLoadsOsm(Base):
__tablename__ = 'ego_deu_loads_osm'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_deu_loads_osm_id_seq'::regclass)"))
area_ha = Column(Float(53))
geom = Column(Geometry('POLYGON', 3035), index=True)
t_ego_dp_res_powerplant_vg_enavan_mview = Table(
'ego_dp_res_powerplant_vg_enavan_mview', metadata,
Column('version', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035)),
Column('rea_geom_new', Geometry('POINT', 3035)),
schema='model_draft'
)
t_ego_dp_scenario_log_version_view = Table(
'ego_dp_scenario_log_version_view', metadata,
Column('id', Integer),
Column('version', Text),
Column('io', Text),
Column('schema_name', Text),
Column('table_name', Text),
Column('script_name', Text),
Column('entries', Integer),
Column('status', Text),
Column('user_name', Text),
Column('timestamp', DateTime),
Column('meta_data', Text),
schema='model_draft'
)
class EgoDpSupplyConvPowerplant(Base):
__tablename__ = 'ego_dp_supply_conv_powerplant'
__table_args__ = {'schema': 'model_draft'}
preversion = Column(Text, primary_key=True, nullable=False)
id = Column(Integer, primary_key=True, nullable=False)
bnetza_id = Column(Text)
company = Column(Text)
name = Column(Text)
postcode = Column(Text)
city = Column(Text)
street = Column(Text)
state = Column(Text)
block = Column(Text)
commissioned_original = Column(Text)
commissioned = Column(Float(53))
retrofit = Column(Float(53))
shutdown = Column(Float(53))
status = Column(Text)
fuel = Column(Text)
technology = Column(Text)
type = Column(Text)
eeg = Column(Text)
chp = Column(Text)
capacity = Column(Float(53))
capacity_uba = Column(Float(53))
chp_capacity_uba = Column(Float(53))
efficiency_data = Column(Float(53))
efficiency_estimate = Column(Float(53))
network_node = Column(Text)
voltage = Column(Text)
network_operator = Column(Text)
name_uba = Column(Text)
lat = Column(Float(53))
lon = Column(Float(53))
comment = Column(Text)
geom = Column(Geometry('POINT', 4326), index=True)
voltage_level = Column(SmallInteger)
subst_id = Column(BigInteger)
otg_id = Column(BigInteger)
un_id = Column(BigInteger)
la_id = Column(Integer)
scenario = Column(Text, primary_key=True, nullable=False)
flag = Column(Text)
nuts = Column(String)
class EgoDpSupplyResPowerplant(Base):
__tablename__ = 'ego_dp_supply_res_powerplant'
__table_args__ = {'schema': 'model_draft'}
preversion = Column(Text, primary_key=True, nullable=False)
id = Column(BigInteger, primary_key=True, nullable=False)
start_up_date = Column(DateTime)
electrical_capacity = Column(Numeric)
generation_type = Column(Text)
generation_subtype = Column(String)
thermal_capacity = Column(Numeric)
city = Column(String)
postcode = Column(String)
address = Column(String)
lon = Column(Numeric)
lat = Column(Numeric)
gps_accuracy = Column(String)
validation = Column(String)
notification_reason = Column(String)
eeg_id = Column(String)
tso = Column(Float(53))
tso_eic = Column(String)
dso_id = Column(String)
dso = Column(String)
voltage_level_var = Column(String)
network_node = Column(String)
power_plant_id = Column(String)
source = Column(String)
comment = Column(String)
geom = Column(Geometry('POINT', 4326), index=True)
subst_id = Column(BigInteger)
otg_id = Column(BigInteger)
un_id = Column(BigInteger)
voltage_level = Column(SmallInteger)
scenario = Column(String, primary_key=True, nullable=False)
flag = Column(String)
nuts = Column(String)
w_id = Column(BigInteger)
la_id = Column(Integer)
mvlv_subst_id = Column(Integer)
rea_sort = Column(Integer)
rea_flag = Column(String)
rea_geom_line = Column(Geometry('LINESTRING', 3035))
rea_geom_new = Column(Geometry('POINT', 3035), index=True)
t_ego_dp_supply_res_powerplant_out_mview = Table(
'ego_dp_supply_res_powerplant_out_mview', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
Column('w_id', BigInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035), index=True),
Column('rea_geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
class EgoDpSupplyResPowerplantV030(Base):
__tablename__ = 'ego_dp_supply_res_powerplant_v030'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, nullable=False)
start_up_date = Column(DateTime)
electrical_capacity = Column(Numeric)
generation_type = Column(Text)
generation_subtype = Column(String)
thermal_capacity = Column(Numeric)
city = Column(String)
postcode = Column(String)
address = Column(String)
lon = Column(Numeric)
lat = Column(Numeric)
gps_accuracy = Column(String)
validation = Column(String)
notification_reason = Column(String)
eeg_id = Column(String)
tso = Column(Float(53))
tso_eic = Column(String)
dso_id = Column(String)
dso = Column(String)
voltage_level_var = Column(String)
network_node = Column(String)
power_plant_id = Column(String)
source = Column(String)
comment = Column(String)
geom = Column(Geometry('POINT', 4326))
subst_id = Column(BigInteger)
otg_id = Column(BigInteger)
un_id = Column(BigInteger)
voltage_level = Column(SmallInteger)
la_id = Column(Integer)
mvlv_subst_id = Column(Integer)
rea_sort = Column(Integer)
rea_flag = Column(String)
rea_geom_line = Column(Geometry('LINESTRING', 3035))
rea_geom_new = Column(Geometry('POINT', 3035))
preversion = Column(Text)
flag = Column(String)
scenario = Column(String, primary_key=True, nullable=False, server_default=text("'none'::character varying"))
nuts = Column(String)
w_id = Column(BigInteger)
class EgoGridDing0HvmvTransformer(Base):
__tablename__ = 'ego_grid_ding0_hvmv_transformer'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_hvmv_transformer_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
geom = Column(Geometry('POINT', 4326), index=True)
name = Column(String(100))
voltage_op = Column(Float)
s_nom = Column(Float)
x = Column(Float)
r = Column(Float)
class EgoGridDing0Line(Base):
__tablename__ = 'ego_grid_ding0_line'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_line_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
edge_name = Column(String(100))
grid_name = Column(String(100))
node1 = Column(String(100))
node2 = Column(String(100))
type_kind = Column(String(20))
type_name = Column(String(30))
length = Column(Float)
u_n = Column(Float)
c = Column(Float)
l = Column(Float)
r = Column(Float)
i_max_th = Column(Float)
geom = Column(Geometry('LINESTRING', 4326), index=True)
class EgoGridDing0LvBranchtee(Base):
__tablename__ = 'ego_grid_ding0_lv_branchtee'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_lv_branchtee_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
geom = Column(Geometry('POINT', 4326), index=True)
name = Column(String(100))
class EgoGridDing0LvGenerator(Base):
__tablename__ = 'ego_grid_ding0_lv_generator'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_lv_generator_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
la_id = Column(BigInteger)
name = Column(String(100))
lv_grid_id = Column(BigInteger)
geom = Column(Geometry('POINT', 4326), index=True)
type = Column(String(22))
subtype = Column(String(22))
v_level = Column(Integer)
nominal_capacity = Column(Float)
weather_cell_id = Column(BigInteger)
is_aggregated = Column(Boolean)
class EgoGridDing0LvGrid(Base):
__tablename__ = 'ego_grid_ding0_lv_grid'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_lv_grid_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
name = Column(String(100))
geom = Column(Geometry('MULTIPOLYGON', 4326), index=True)
population = Column(BigInteger)
voltage_nom = Column(Float)
class EgoGridDing0LvLoad(Base):
__tablename__ = 'ego_grid_ding0_lv_load'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_lv_load_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
name = Column(String(100))
lv_grid_id = Column(Integer)
geom = Column(Geometry('POINT', 4326), index=True)
consumption = Column(String(100))
class EgoGridDing0LvStation(Base):
__tablename__ = 'ego_grid_ding0_lv_station'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_lv_station_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
geom = Column(Geometry('POINT', 4326), index=True)
name = Column(String(100))
class EgoGridDing0MvBranchtee(Base):
__tablename__ = 'ego_grid_ding0_mv_branchtee'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_mv_branchtee_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
geom = Column(Geometry('POINT', 4326), index=True)
name = Column(String(100))
class EgoGridDing0MvCircuitbreaker(Base):
__tablename__ = 'ego_grid_ding0_mv_circuitbreaker'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_mv_circuitbreaker_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
geom = Column(Geometry('POINT', 4326), index=True)
name = Column(String(100))
status = Column(String(10))
class EgoGridDing0MvGenerator(Base):
__tablename__ = 'ego_grid_ding0_mv_generator'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_mv_generator_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
name = Column(String(100))
geom = Column(Geometry('POINT', 4326), index=True)
type = Column(String(22))
subtype = Column(String(22))
v_level = Column(Integer)
nominal_capacity = Column(Float)
weather_cell_id = Column(BigInteger)
is_aggregated = Column(Boolean)
class EgoGridDing0MvGrid(Base):
__tablename__ = 'ego_grid_ding0_mv_grid'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_mv_grid_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
geom = Column(Geometry('MULTIPOLYGON', 4326), index=True)
name = Column(String(100))
population = Column(BigInteger)
voltage_nom = Column(Float)
class EgoGridDing0MvLoad(Base):
__tablename__ = 'ego_grid_ding0_mv_load'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_mv_load_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
name = Column(String(100))
geom = Column(Geometry(srid=4326), index=True)
is_aggregated = Column(Boolean)
consumption = Column(String(100))
class EgoGridDing0MvStation(Base):
__tablename__ = 'ego_grid_ding0_mv_station'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_mv_station_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
geom = Column(Geometry('POINT', 4326), index=True)
name = Column(String(100))
class EgoGridDing0MvlvMapping(Base):
__tablename__ = 'ego_grid_ding0_mvlv_mapping'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_mvlv_mapping_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
lv_grid_id = Column(BigInteger)
lv_grid_name = Column(String(100))
mv_grid_id = Column(BigInteger)
mv_grid_name = Column(String(100))
class EgoGridDing0MvlvTransformer(Base):
__tablename__ = 'ego_grid_ding0_mvlv_transformer'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_mvlv_transformer_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False)
id_db = Column(BigInteger)
geom = Column(Geometry('POINT', 4326), index=True)
name = Column(String(100))
voltage_op = Column(Float)
s_nom = Column(Float)
x = Column(Float)
r = Column(Float)
class EgoGridDing0Versioning(Base):
__tablename__ = 'ego_grid_ding0_versioning'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_ding0_versioning_id_seq'::regclass)"))
run_id = Column(BigInteger, nullable=False, unique=True)
description = Column(String(3000))
class EgoGridEhvSubstation(Base):
__tablename__ = 'ego_grid_ehv_substation'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, nullable=False, unique=True, server_default=text("nextval('model_draft.ego_grid_ehv_substation_subst_id_seq'::regclass)"))
lon = Column(Float(53), nullable=False)
lat = Column(Float(53), nullable=False)
point = Column(Geometry('POINT', 4326), nullable=False)
polygon = Column(Geometry, nullable=False)
voltage = Column(Text)
power_type = Column(Text)
substation = Column(Text)
osm_id = Column(Text, primary_key=True)
osm_www = Column(Text, nullable=False)
frequency = Column(Text)
subst_name = Column(Text)
ref = Column(Text)
operator = Column(Text)
dbahn = Column(Text)
status = Column(SmallInteger, nullable=False)
otg_id = Column(BigInteger)
class EgoGridEhvSubstationVoronoi(EgoGridEhvSubstation):
__tablename__ = 'ego_grid_ehv_substation_voronoi'
__table_args__ = {'schema': 'model_draft'}
geom = Column(Geometry('POLYGON', 4326), index=True)
subst_id = Column(ForeignKey('model_draft.ego_grid_ehv_substation.subst_id'), primary_key=True)
t_ego_grid_hv_electrical_neighbours_bus = Table(
'ego_grid_hv_electrical_neighbours_bus', metadata,
Column('scn_name', String, nullable=False, server_default=text("'Status Quo'::character varying")),
Column('bus_id', BigInteger),
Column('central_bus', Boolean, server_default=text("false")),
Column('v_nom', Float(53)),
Column('cntr_id', Text),
Column('current_type', Text, server_default=text("'AC'::text")),
Column('v_mag_pu_min', Float(53), server_default=text("0")),
Column('v_mag_pu_max', Float(53)),
Column('geom', Geometry('POINT', 4326)),
schema='model_draft'
)
class EgoGridHvElectricalNeighboursLine(Base):
__tablename__ = 'ego_grid_hv_electrical_neighbours_line'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
line_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
cntr_id_1 = Column(Text)
cntr_id_2 = Column(Text)
v_nom = Column(BigInteger)
x = Column(Numeric, server_default=text("0"))
r = Column(Numeric, server_default=text("0"))
g = Column(Numeric, server_default=text("0"))
b = Column(Numeric, server_default=text("0"))
s_nom = Column(Numeric, server_default=text("0"))
s_nom_extendable = Column(Boolean, server_default=text("false"))
s_nom_min = Column(Float(53), server_default=text("0"))
s_nom_max = Column(Float(53))
capital_cost = Column(Float(53))
length = Column(Float(53))
cables = Column(Integer)
frequency = Column(Numeric)
terrain_factor = Column(Float(53), server_default=text("1"))
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
t_ego_grid_hv_electrical_neighbours_link = Table(
'ego_grid_hv_electrical_neighbours_link', metadata,
Column('scn_name', String, nullable=False, server_default=text("'Status Quo'::character varying")),
Column('link_id', BigInteger, nullable=False),
Column('bus0', BigInteger),
Column('bus1', BigInteger),
Column('cntr_id_1', String),
Column('cntr_id_2', String),
Column('v_nom', BigInteger),
Column('efficiency', Float(53), server_default=text("1")),
Column('p_nom', Numeric, server_default=text("0")),
Column('p_nom_extendable', Boolean, server_default=text("false")),
Column('p_nom_min', Float(53), server_default=text("0")),
Column('p_min_pu', Float(53)),
Column('p_max_pu', Float(53)),
Column('p_nom_max', Float(53)),
Column('capital_cost', Float(53)),
Column('length', Float(53)),
Column('terrain_factor', Float(53), server_default=text("1")),
Column('geom', Geometry('MULTILINESTRING', 4326)),
Column('topo', Geometry('LINESTRING', 4326)),
schema='model_draft'
)
class EgoGridHvElectricalNeighboursTransformer(Base):
__tablename__ = 'ego_grid_hv_electrical_neighbours_transformer'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
trafo_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
cntr_id = Column(Text)
x = Column(Numeric, server_default=text("0"))
r = Column(Numeric, server_default=text("0"))
g = Column(Numeric, server_default=text("0"))
b = Column(Numeric, server_default=text("0"))
s_nom = Column(Float(53), server_default=text("0"))
s_nom_extendable = Column(Boolean, server_default=text("false"))
s_nom_min = Column(Float(53), server_default=text("0"))
s_nom_max = Column(Float(53))
tap_ratio = Column(Float(53))
phase_shift = Column(Float(53))
capital_cost = Column(Float(53), server_default=text("0"))
geom = Column(Geometry('MULTILINESTRING', 4326))
geom_point = Column(Geometry('POINT', 4326))
topo = Column(Geometry('LINESTRING', 4326))
v1 = Column(Float(53), server_default=text("0"))
v2 = Column(Float(53), server_default=text("0"))
s1 = Column(Float(53), server_default=text("0"))
s2 = Column(Float(53), server_default=text("0"))
s_min = Column(Float(53), server_default=text("0"))
class EgoGridHvmvSubstation(Base):
__tablename__ = 'ego_grid_hvmv_substation'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, nullable=False, server_default=text("nextval('model_draft.ego_grid_hvmv_substation_subst_id_seq'::regclass)"))
lon = Column(Float(53), nullable=False)
lat = Column(Float(53), nullable=False)
point = Column(Geometry('POINT', 4326), nullable=False)
polygon = Column(Geometry, nullable=False)
voltage = Column(Text)
power_type = Column(Text)
substation = Column(Text)
osm_id = Column(Text, primary_key=True)
osm_www = Column(Text, nullable=False)
frequency = Column(Text)
subst_name = Column(Text)
ref = Column(Text)
operator = Column(Text)
dbahn = Column(Text)
status = Column(SmallInteger, nullable=False)
otg_id = Column(BigInteger)
ags_0 = Column(Text)
geom = Column(Geometry('POINT', 3035), index=True)
class EgoGridHvmvSubstationDummy(Base):
__tablename__ = 'ego_grid_hvmv_substation_dummy'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
subst_name = Column(Text)
geom = Column(Geometry('POINT', 3035), index=True)
t_ego_grid_hvmv_substation_mun_2_mview = Table(
'ego_grid_hvmv_substation_mun_2_mview', metadata,
Column('subst_id', Integer, unique=True),
Column('subst_name', Text),
Column('subst_type', Integer),
Column('geom', Geometry('POINT', 3035)),
schema='model_draft'
)
class EgoGridHvmvSubstationV030(Base):
__tablename__ = 'ego_grid_hvmv_substation_v030'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, nullable=False, server_default=text("nextval('model_draft.ego_grid_hvmv_substation_v030_subst_id_seq'::regclass)"))
lon = Column(Float(53), nullable=False)
lat = Column(Float(53), nullable=False)
point = Column(Geometry('POINT', 4326), nullable=False)
polygon = Column(Geometry, nullable=False)
voltage = Column(Text)
power_type = Column(Text)
substation = Column(Text)
osm_id = Column(Text, primary_key=True)
osm_www = Column(Text, nullable=False)
frequency = Column(Text)
subst_name = Column(Text)
ref = Column(Text)
operator = Column(Text)
dbahn = Column(Text)
status = Column(SmallInteger, nullable=False)
otg_id = Column(BigInteger)
ags_0 = Column(Text)
geom = Column(Geometry('POINT', 3035))
class EgoGridHvmvSubstationVoronoi(Base):
__tablename__ = 'ego_grid_hvmv_substation_voronoi'
__table_args__ = {'schema': 'model_draft'}
geom = Column(Geometry('POLYGON', 3035), index=True)
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_hvmv_substation_voronoi_id_seq'::regclass)"))
subst_id = Column(Integer)
subst_sum = Column(Integer)
class EgoGridHvmvSubstationVoronoiCut(Base):
__tablename__ = 'ego_grid_hvmv_substation_voronoi_cut'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_hvmv_substation_voronoi_cut_id_seq'::regclass)"))
subst_id = Column(Integer)
mun_id = Column(Integer)
voi_id = Column(Integer)
ags_0 = Column(String(12))
subst_type = Column(Integer)
subst_sum = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
geom_sub = Column(Geometry('POINT', 3035), index=True)
t_ego_grid_hvmv_substation_voronoi_cut_0subst_mview = Table(
'ego_grid_hvmv_substation_voronoi_cut_0subst_mview', metadata,
Column('id', Integer, unique=True),
Column('subst_id', Integer),
Column('mun_id', Integer),
Column('voi_id', Integer),
Column('ags_0', String(12)),
Column('subst_type', Integer),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
t_ego_grid_hvmv_substation_voronoi_cut_0subst_nn_line_mview = Table(
'ego_grid_hvmv_substation_voronoi_cut_0subst_nn_line_mview', metadata,
Column('id', BigInteger, unique=True),
Column('voi_id', Integer),
Column('subst_id', Integer),
Column('geom_centre', Geometry('POINT', 3035), index=True),
Column('geom', Geometry('LINESTRING', 3035), index=True),
schema='model_draft'
)
t_ego_grid_hvmv_substation_voronoi_cut_0subst_nn_mview = Table(
'ego_grid_hvmv_substation_voronoi_cut_0subst_nn_mview', metadata,
Column('voi_id', Integer, unique=True),
Column('voi_ags_0', String(12)),
Column('geom_voi', Geometry('POLYGON', 3035), index=True),
Column('subst_id', Integer),
Column('ags_0', String(12)),
Column('geom_sub', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
t_ego_grid_hvmv_substation_voronoi_cut_0subst_nn_union_mview = Table(
'ego_grid_hvmv_substation_voronoi_cut_0subst_nn_union_mview', metadata,
Column('subst_id', Integer, unique=True),
Column('geom', Geometry('MULTIPOLYGON', 3035), index=True),
schema='model_draft'
)
t_ego_grid_hvmv_substation_voronoi_cut_1subst_mview = Table(
'ego_grid_hvmv_substation_voronoi_cut_1subst_mview', metadata,
Column('id', Integer, unique=True),
Column('subst_id', Integer),
Column('mun_id', Integer),
Column('voi_id', Integer),
Column('ags_0', String(12)),
Column('subst_type', Integer),
Column('subst_sum', Integer),
Column('geom', Geometry('POLYGON', 3035), index=True),
Column('geom_sub', Geometry('POINT', 3035)),
schema='model_draft'
)
class EgoGridHvmvSubstationVoronoiCutNnCollect(Base):
__tablename__ = 'ego_grid_hvmv_substation_voronoi_cut_nn_collect'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_hvmv_substation_voronoi_cut_nn_collect_id_seq'::regclass)"))
subst_id = Column(Integer)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
t_ego_grid_hvmv_substation_voronoi_cut_nn_mview = Table(
'ego_grid_hvmv_substation_voronoi_cut_nn_mview', metadata,
Column('subst_id', Integer, unique=True),
Column('geom', Geometry('MULTIPOLYGON', 3035), index=True),
schema='model_draft'
)
t_ego_grid_hvmv_substation_voronoi_mview = Table(
'ego_grid_hvmv_substation_voronoi_mview', metadata,
Column('id', Integer, unique=True),
Column('subst_id', Integer),
Column('subst_sum', Integer),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
class EgoGridLineExpansionCost(Base):
__tablename__ = 'ego_grid_line_expansion_costs'
__table_args__ = {'schema': 'model_draft'}
cost_id = Column(BigInteger, primary_key=True)
voltage_level = Column(Text)
component = Column(Text)
measure = Column(Text)
investment_cost = Column(Float(53))
unit = Column(Text)
comment = Column(Text)
source = Column(Text)
capital_costs_pypsa = Column(Float(53))
class EgoGridLvBuildingConn(Base):
__tablename__ = 'ego_grid_lv_building_conn'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_building_conn_id_seq'::regclass)"))
building_id = Column(Integer)
street_id = Column(Integer)
la_id = Column(Integer)
geom_line = Column(Geometry('LINESTRING', 3035), index=True)
distance = Column(Float(53))
class EgoGridLvBuilding(Base):
__tablename__ = 'ego_grid_lv_buildings'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_buildings_id_seq'::regclass)"))
polygon_id = Column(Integer)
la_id = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoGridLvCandidatepoint(Base):
__tablename__ = 'ego_grid_lv_candidatepoints'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_candidatepoints_id_seq'::regclass)"))
geom = Column(Geometry('POINT', 3035))
la_id = Column(Integer)
pop50 = Column(Integer)
pop100 = Column(Integer)
diststreet = Column(Integer)
distcrossroad = Column(Integer)
buildingsnr50 = Column(Integer)
buildingsarea100 = Column(Integer)
buildingsarea250 = Column(Integer)
class EgoGridLvGriddistrict(Base):
__tablename__ = 'ego_grid_lv_griddistrict'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
mvlv_subst_id = Column(Integer)
subst_id = Column(Integer)
la_id = Column(Integer)
nn = Column(Boolean)
subst_cnt = Column(Integer)
zensus_sum = Column(Integer)
zensus_count = Column(Integer)
zensus_density = Column(Float(53))
population_density = Column(Float(53))
area_ha = Column(Float(53))
sector_area_residential = Column(Float(53))
sector_area_retail = Column(Float(53))
sector_area_industrial = Column(Float(53))
sector_area_agricultural = Column(Float(53))
sector_area_sum = Column(Float(53))
sector_share_residential = Column(Float(53))
sector_share_retail = Column(Float(53))
sector_share_industrial = Column(Float(53))
sector_share_agricultural = Column(Float(53))
sector_share_sum = Column(Float(53))
sector_count_residential = Column(Integer)
sector_count_retail = Column(Integer)
sector_count_industrial = Column(Integer)
sector_count_agricultural = Column(Integer)
sector_count_sum = Column(Integer)
sector_consumption_residential = Column(Float(53))
sector_consumption_retail = Column(Float(53))
sector_consumption_industrial = Column(Float(53))
sector_consumption_agricultural = Column(Float(53))
sector_consumption_sum = Column(Float(53))
sector_peakload_residential = Column(Float(53))
sector_peakload_retail = Column(Float(53))
sector_peakload_industrial = Column(Float(53))
sector_peakload_agricultural = Column(Float(53))
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoGridLvGriddistrictCut(Base):
__tablename__ = 'ego_grid_lv_griddistrict_cut'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_griddistrict_cut_id_seq'::regclass)"))
la_id = Column(Integer)
subst_id = Column(Integer)
subst_cnt = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoGridLvGriddistrictCut0subst(Base):
__tablename__ = 'ego_grid_lv_griddistrict_cut_0subst'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
la_id = Column(Integer)
subst_id = Column(Integer)
subst_cnt = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoGridLvGriddistrictCut1subst(Base):
__tablename__ = 'ego_grid_lv_griddistrict_cut_1subst'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
la_id = Column(Integer)
subst_id = Column(Integer)
subst_cnt = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoGridLvGriddistrictCutNn(Base):
__tablename__ = 'ego_grid_lv_griddistrict_cut_nn'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_griddistrict_cut_nn_id_seq'::regclass)"))
a_id = Column(Integer)
b_id = Column(Integer)
subst_id = Column(Integer)
la_id = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
geom_line = Column(Geometry('LINESTRING', 3035), index=True)
distance = Column(Float(53))
class EgoGridLvGriddistrictCutNnCollect(Base):
__tablename__ = 'ego_grid_lv_griddistrict_cut_nn_collect'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
mvlv_subst_id = Column(Integer)
subst_id = Column(Integer)
la_id = Column(Integer)
nn = Column(Boolean)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoGridLvGriddistrictCutXsubst(Base):
__tablename__ = 'ego_grid_lv_griddistrict_cut_xsubst'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
la_id = Column(Integer)
subst_id = Column(Integer)
subst_cnt = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoGridLvGriddistrictPaper(Base):
__tablename__ = 'ego_grid_lv_griddistrict_paper'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
geom = Column(Geometry('POINT', 3035), index=True)
la_id = Column(Integer)
ont_count = Column(Integer)
ont_id = Column(Integer)
merge_id = Column(Integer)
mvlv_subst_id = Column(Integer)
class EgoGridLvGriddistrictsvoronoi(Base):
__tablename__ = 'ego_grid_lv_griddistrictsvoronoi'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_griddistrictsvoronoi_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
subst_id = Column(Integer)
class EgoGridLvGriddistrictwithoutpop(Base):
__tablename__ = 'ego_grid_lv_griddistrictwithoutpop'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_griddistrictwithoutpop_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
load_area_id = Column(Integer)
class EgoGridLvLoadareaRest(Base):
__tablename__ = 'ego_grid_lv_loadarea_rest'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_loadarea_rest_id_seq'::regclass)"))
la_id = Column(Integer)
geom_point = Column(Geometry('POINT', 3035), index=True)
geom = Column(Geometry('POLYGON', 3035), index=True)
t_ego_grid_lv_loadareas = Table(
'ego_grid_lv_loadareas', metadata,
Column('version', Text),
Column('id', Integer),
Column('area_ha', Numeric),
Column('zensus_sum', Integer),
Column('zensus_count', Integer),
Column('zensus_density', Numeric),
Column('geom', Geometry('POLYGON', 3035)),
Column('ontnumber', Numeric),
schema='model_draft'
)
t_ego_grid_lv_ons = Table(
'ego_grid_lv_ons', metadata,
Column('geom', Geometry('POINT', 3035)),
schema='model_draft'
)
class EgoGridLvStreet(Base):
__tablename__ = 'ego_grid_lv_streets'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_streets_id_seq'::regclass)"))
line_id = Column(Integer)
la_id = Column(Integer)
geom = Column(Geometry('LINESTRING', 3035), index=True)
class EgoGridLvTestGriddistrict(Base):
__tablename__ = 'ego_grid_lv_test_griddistrict'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer)
geom = Column(Geometry('MULTIPOLYGON', 3035))
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_test_griddistrict_id_seq'::regclass)"))
class EgoGridLvTestStreet(Base):
__tablename__ = 'ego_grid_lv_test_streets'
__table_args__ = {'schema': 'model_draft'}
osm_id = Column(BigInteger)
geom = Column(Geometry)
gid = Column(Integer)
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_test_streets_id_seq'::regclass)"))
class EgoGridLvZensusdaten(Base):
__tablename__ = 'ego_grid_lv_zensusdaten'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_lv_zensusdaten_id_seq'::regclass)"))
la_id = Column(Integer)
population = Column(Numeric(10, 0))
geom_point = Column(Geometry('POINT', 3035))
geom = Column(Geometry('POLYGON', 3035))
class EgoGridMvGriddistrict(Base):
__tablename__ = 'ego_grid_mv_griddistrict'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
subst_sum = Column(Integer)
type1 = Column(Integer)
type1_cnt = Column(Integer)
type2 = Column(Integer)
type2_cnt = Column(Integer)
type3 = Column(Integer)
type3_cnt = Column(Integer)
group = Column(CHAR(1))
gem = Column(Integer)
gem_clean = Column(Integer)
zensus_sum = Column(Integer)
zensus_count = Column(Integer)
zensus_density = Column(Numeric)
population_density = Column(Numeric)
la_count = Column(Integer)
area_ha = Column(Numeric)
la_area = Column(Numeric(10, 1))
free_area = Column(Numeric(10, 1))
area_share = Column(Numeric(4, 1))
consumption = Column(Numeric)
consumption_per_area = Column(Numeric)
dea_cnt = Column(Integer)
dea_capacity = Column(Numeric)
lv_dea_cnt = Column(Integer)
lv_dea_capacity = Column(Numeric)
mv_dea_cnt = Column(Integer)
mv_dea_capacity = Column(Numeric)
geom_type = Column(Text)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
t_ego_grid_mv_griddistrict_2035 = Table(
'ego_grid_mv_griddistrict_2035', metadata,
Column('subst_id', Integer),
Column('subst_sum', Integer),
Column('type1', Integer),
Column('type1_cnt', Integer),
Column('type2', Integer),
Column('type2_cnt', Integer),
Column('type3', Integer),
Column('type3_cnt', Integer),
Column('group', CHAR(1)),
Column('gem', Integer),
Column('gem_clean', Integer),
Column('zensus_sum', Integer),
Column('zensus_count', Integer),
Column('zensus_density', Numeric),
Column('population_density', Numeric),
Column('la_count', Integer),
Column('area_ha', Numeric),
Column('la_area', Numeric(10, 1)),
Column('free_area', Numeric(10, 1)),
Column('area_share', Numeric(4, 1)),
Column('consumption', Numeric),
Column('consumption_per_area', Numeric),
Column('dea_cnt', Integer),
Column('dea_capacity', Numeric),
Column('lv_dea_cnt', Integer),
Column('lv_dea_capacity', Numeric),
Column('mv_dea_cnt', Integer),
Column('mv_dea_capacity', Numeric),
Column('geom_type', Text),
Column('geom', Geometry('MULTIPOLYGON', 3035)),
schema='model_draft'
)
t_ego_grid_mv_griddistrict_2050 = Table(
'ego_grid_mv_griddistrict_2050', metadata,
Column('subst_id', Integer),
Column('subst_sum', Integer),
Column('type1', Integer),
Column('type1_cnt', Integer),
Column('type2', Integer),
Column('type2_cnt', Integer),
Column('type3', Integer),
Column('type3_cnt', Integer),
Column('group', CHAR(1)),
Column('gem', Integer),
Column('gem_clean', Integer),
Column('zensus_sum', Integer),
Column('zensus_count', Integer),
Column('zensus_density', Numeric),
Column('population_density', Numeric),
Column('la_count', Integer),
Column('area_ha', Numeric),
Column('la_area', Numeric(10, 1)),
Column('free_area', Numeric(10, 1)),
Column('area_share', Numeric(4, 1)),
Column('consumption', Numeric),
Column('consumption_per_area', Numeric),
Column('dea_cnt', Integer),
Column('dea_capacity', Numeric),
Column('lv_dea_cnt', Integer),
Column('lv_dea_capacity', Numeric),
Column('mv_dea_cnt', Integer),
Column('mv_dea_capacity', Numeric),
Column('geom_type', Text),
Column('geom', Geometry('MULTIPOLYGON', 3035)),
schema='model_draft'
)
class EgoGridMvGriddistrictCollect(Base):
__tablename__ = 'ego_grid_mv_griddistrict_collect'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mv_griddistrict_collect_id_seq'::regclass)"))
subst_id = Column(Integer)
subst_name = Column(Text)
ags_0 = Column(String(12))
geom_sub = Column(Geometry('POINT', 3035), index=True)
subst_sum = Column(Integer)
subst_type = Column(Integer)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoGridMvGriddistrictDump(Base):
__tablename__ = 'ego_grid_mv_griddistrict_dump'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mv_griddistrict_dump_id_seq1'::regclass)"))
subst_id = Column(Integer)
subst_cnt = Column(Integer)
geom_point = Column(Geometry('POINT', 3035), index=True)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoGridMvGriddistrictDump0sub(Base):
__tablename__ = 'ego_grid_mv_griddistrict_dump_0sub'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mv_griddistrict_dump_0sub_id_seq'::regclass)"))
subst_id = Column(Integer)
subst_cnt = Column(Integer)
geom_point = Column(Geometry('POINT', 3035))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoGridMvGriddistrictDump1sub(Base):
__tablename__ = 'ego_grid_mv_griddistrict_dump_1sub'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mv_griddistrict_dump_1sub_id_seq'::regclass)"))
subst_id = Column(Integer)
subst_cnt = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoGridMvGriddistrictDumpNn(Base):
__tablename__ = 'ego_grid_mv_griddistrict_dump_nn'
__table_args__ = {'schema': 'model_draft'}
a_id = Column(Integer, primary_key=True)
a_geom_point = Column(Geometry('POINT', 3035))
a_geom = Column(Geometry('POLYGON', 3035))
b_id = Column(Integer)
subst_id = Column(Integer)
b_subst_cnt = Column(Integer)
b_geom = Column(Geometry('POLYGON', 3035))
distance = Column(Float(53))
class EgoGridMvGriddistrictDumpNnCollect(Base):
__tablename__ = 'ego_grid_mv_griddistrict_dump_nn_collect'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mv_griddistrict_dump_nn_collect_id_seq'::regclass)"))
subst_id = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoGridMvGriddistrictDumpNnCollectUnion(Base):
__tablename__ = 'ego_grid_mv_griddistrict_dump_nn_collect_union'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoGridMvGriddistrictDumpNnLine(Base):
__tablename__ = 'ego_grid_mv_griddistrict_dump_nn_line'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mv_griddistrict_dump_nn_line_id_seq'::regclass)"))
a_id = Column(Integer)
subst_id = Column(Integer)
geom = Column(Geometry('LINESTRING', 3035), index=True)
class EgoGridMvGriddistrictDumpNnUnion(Base):
__tablename__ = 'ego_grid_mv_griddistrict_dump_nn_union'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoGridMvGriddistrictNew(Base):
__tablename__ = 'ego_grid_mv_griddistrict_new'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoGridMvGriddistrictNewDump(Base):
__tablename__ = 'ego_grid_mv_griddistrict_new_dump'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mv_griddistrict_new_dump_id_seq'::regclass)"))
subst_id = Column(Integer)
subst_cnt = Column(Integer)
geom_point = Column(Geometry('POINT', 3035))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoGridMvGriddistrictType1(Base):
__tablename__ = 'ego_grid_mv_griddistrict_type1'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
subst_name = Column(Text)
ags_0 = Column(String(12))
geom_sub = Column(Geometry('POINT', 3035), index=True)
subst_sum = Column(Integer)
subst_type = Column(Integer)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoGridMvGriddistrictType2(Base):
__tablename__ = 'ego_grid_mv_griddistrict_type2'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
subst_name = Column(Text)
ags_0 = Column(String(12))
geom_sub = Column(Geometry('POINT', 3035), index=True)
subst_sum = Column(Integer)
subst_type = Column(Integer)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoGridMvGriddistrictType3(Base):
__tablename__ = 'ego_grid_mv_griddistrict_type3'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
subst_name = Column(Text)
ags_0 = Column(String(12))
geom_sub = Column(Geometry('POINT', 3035), index=True)
subst_sum = Column(Integer)
subst_type = Column(Integer)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoGridMvGriddistrictUnion(Base):
__tablename__ = 'ego_grid_mv_griddistrict_union'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoGridMvVisualizationBranch(Base):
__tablename__ = 'ego_grid_mv_visualization_branches'
__table_args__ = {'schema': 'model_draft'}
branch_id = Column(String(25), primary_key=True)
grid_id = Column(Integer)
type_name = Column(String(25))
type_kind = Column(String(5))
type_v_nom = Column(Integer)
type_s_nom = Column(Float(53))
length = Column(Float(53))
geom = Column(Geometry('LINESTRING', 4326), index=True)
s_res0 = Column(Float(53))
s_res1 = Column(Float(53))
class EgoGridMvVisualizationBranchesJa(Base):
__tablename__ = 'ego_grid_mv_visualization_branches_ja'
__table_args__ = {'schema': 'model_draft'}
branch_id = Column(String(25), primary_key=True)
grid_id = Column(Integer)
type_name = Column(String(25))
type_kind = Column(String(5))
type_v_nom = Column(Integer)
type_s_nom = Column(Float(53))
length = Column(Float(53))
geom = Column(Geometry('LINESTRING', 4326), index=True)
s_res0 = Column(Float(53))
s_res1 = Column(Float(53))
class EgoGridMvVisualizationBunch(Base):
__tablename__ = 'ego_grid_mv_visualization_bunch'
__table_args__ = {'schema': 'model_draft'}
grid_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mv_visualization_bunch_grid_id_seq'::regclass)"))
geom_mv_station = Column(Geometry('POINT', 4326), index=True)
geom_mv_cable_dists = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_mv_circuit_breakers = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_lv_load_area_centres = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_lv_stations = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_mv_generators = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_mv_lines = Column(Geometry('MULTILINESTRING', 4326), index=True)
class EgoGridMvVisualizationBunchJa(Base):
__tablename__ = 'ego_grid_mv_visualization_bunch_ja'
__table_args__ = {'schema': 'model_draft'}
grid_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mv_visualization_bunch_grid_id_seq'::regclass)"))
geom_mv_station = Column(Geometry('POINT', 4326), index=True)
geom_mv_cable_dists = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_mv_circuit_breakers = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_lv_load_area_centres = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_lv_stations = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_mv_generators = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_mv_lines = Column(Geometry('MULTILINESTRING', 4326), index=True)
class EgoGridMvVisualizationBunchPaper1(Base):
__tablename__ = 'ego_grid_mv_visualization_bunch_paper1'
__table_args__ = {'schema': 'model_draft'}
grid_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mv_visualization_bunch_paper1_grid_id_seq'::regclass)"))
geom_mv_station = Column(Geometry('POINT', 4326), index=True)
geom_mv_cable_dists = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_mv_circuit_breakers = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_lv_load_area_centres = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_lv_stations = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_mv_generators = Column(Geometry('MULTIPOINT', 4326), index=True)
geom_mv_lines = Column(Geometry('MULTILINESTRING', 4326), index=True)
class EgoGridMvVisualizationNode(Base):
__tablename__ = 'ego_grid_mv_visualization_nodes'
__table_args__ = {'schema': 'model_draft'}
node_id = Column(String(100), primary_key=True)
grid_id = Column(Integer)
v_nom = Column(Integer)
geom = Column(Geometry('POINT', 4326), index=True)
v_res0 = Column(Float(53))
v_res1 = Column(Float(53))
class EgoGridMvVisualizationNodesJa(Base):
__tablename__ = 'ego_grid_mv_visualization_nodes_ja'
__table_args__ = {'schema': 'model_draft'}
node_id = Column(String(100), primary_key=True)
grid_id = Column(Integer)
v_nom = Column(Integer)
geom = Column(Geometry('POINT', 4326), index=True)
v_res0 = Column(Float(53))
v_res1 = Column(Float(53))
t_ego_grid_mvlv_referenceontpoints = Table(
'ego_grid_mvlv_referenceontpoints', metadata,
Column('id', Integer),
Column('geom', Geometry('POINT', 3035)),
Column('pop50', Integer),
Column('pop100', Integer),
Column('pop250', Integer),
Column('pop500', Integer),
Column('pop1000', Integer),
Column('diststreet', Integer),
Column('distcrossroad', Integer),
Column('buildingsnr50', Integer),
Column('buildingsnr100', Integer),
Column('buildingsnr250', Integer),
Column('buildingsnr500', Integer),
Column('buildingsnr1000', Integer),
Column('buildingsarea50', Integer),
Column('buildingsarea100', Integer),
Column('buildingsarea250', Integer),
Column('buildingsarea500', Integer),
Column('buildingsarea1000', Integer),
schema='model_draft'
)
class EgoGridMvlvSubstation(Base):
__tablename__ = 'ego_grid_mvlv_substation'
__table_args__ = {'schema': 'model_draft'}
mvlv_subst_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mvlv_substation_mvlv_subst_id_seq'::regclass)"))
la_id = Column(Integer)
subst_id = Column(Integer)
geom = Column(Geometry('POINT', 3035), index=True)
is_dummy = Column(Boolean)
class EgoGridMvlvSubstationPaper(Base):
__tablename__ = 'ego_grid_mvlv_substation_paper'
__table_args__ = {'schema': 'model_draft'}
mvlv_subst_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mvlv_substation_paper_mvlv_subst_id_seq'::regclass)"))
la_id = Column(Integer)
subst_id = Column(Integer)
geom = Column(Geometry('POINT', 3035), index=True)
is_dummy = Column(Boolean)
class EgoGridMvlvSubstationVoronoi(Base):
__tablename__ = 'ego_grid_mvlv_substation_voronoi'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_grid_mvlv_substation_voronoi_id_seq'::regclass)"))
subst_id = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoGridPfHvBus(Base):
__tablename__ = 'ego_grid_pf_hv_bus'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
bus_id = Column(BigInteger, primary_key=True, nullable=False)
v_nom = Column(Float(53))
current_type = Column(Text, server_default=text("'AC'::text"))
v_mag_pu_min = Column(Float(53), server_default=text("0"))
v_mag_pu_max = Column(Float(53))
geom = Column(Geometry('POINT', 4326))
class EgoGridPfHvBusV030(Base):
__tablename__ = 'ego_grid_pf_hv_bus_v030'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
bus_id = Column(BigInteger, primary_key=True, nullable=False)
v_nom = Column(Float(53))
current_type = Column(Text, server_default=text("'AC'::text"))
v_mag_pu_min = Column(Float(53), server_default=text("0"))
v_mag_pu_max = Column(Float(53))
geom = Column(Geometry('POINT', 4326))
class EgoGridPfHvBusmap(Base):
__tablename__ = 'ego_grid_pf_hv_busmap'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(Text, primary_key=True, nullable=False)
version = Column(Text, primary_key=True, nullable=False)
bus0 = Column(Text, primary_key=True, nullable=False)
bus1 = Column(Text, primary_key=True, nullable=False)
path_length = Column(Numeric)
class EgoGridPfHvDataCheck(Base):
__tablename__ = 'ego_grid_pf_hv_data_check'
__table_args__ = {'schema': 'model_draft'}
test_id = Column(Integer, nullable=False, server_default=text("nextval('model_draft.ego_grid_pf_hv_data_check_test_id_seq'::regclass)"))
version = Column(String, primary_key=True, nullable=False)
scn_name = Column(String, primary_key=True, nullable=False)
test = Column(String, primary_key=True, nullable=False)
table_name = Column(String)
count = Column(Integer)
class EgoGridPfHvExtensionBus(Base):
__tablename__ = 'ego_grid_pf_hv_extension_bus'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
bus_id = Column(BigInteger, primary_key=True, nullable=False)
v_nom = Column(Float(53))
current_type = Column(Text, server_default=text("'AC'::text"))
v_mag_pu_min = Column(Float(53), server_default=text("0"))
v_mag_pu_max = Column(Float(53))
geom = Column(Geometry('POINT', 4326), index=True)
project = Column(String)
bus_name = Column(String)
class EgoGridPfHvExtensionLine(Base):
__tablename__ = 'ego_grid_pf_hv_extension_line'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, nullable=False, server_default=text("'extension_nep2035_confirmed'::character varying"))
line_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
x = Column(Numeric, server_default=text("0"))
r = Column(Numeric, server_default=text("0"))
g = Column(Numeric, server_default=text("0"))
b = Column(Numeric, server_default=text("0"))
s_nom = Column(Numeric, server_default=text("0"))
s_nom_extendable = Column(Boolean, server_default=text("false"))
s_nom_min = Column(Float(53), server_default=text("0"))
s_nom_max = Column(Float(53))
capital_cost = Column(Float(53))
length = Column(Float(53))
cables = Column(Integer)
frequency = Column(Numeric)
terrain_factor = Column(Float(53), server_default=text("1"))
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
v_nom = Column(BigInteger)
project = Column(String)
project_id = Column(BigInteger)
segment = Column(BigInteger),
cable = Column(Boolean, server_default=text("false")),
nova = Column(String)
class EgoGridPfHvExtensionLink(Base):
__tablename__ = 'ego_grid_pf_hv_extension_link'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'extension_nep2035_confirmed'::character varying"))
link_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
efficiency = Column(Float(53), server_default=text("1"))
p_nom = Column(Numeric, server_default=text("0"))
p_nom_extendable = Column(Boolean, server_default=text("false"))
p_nom_min = Column(Float(53), server_default=text("0"))
p_nom_max = Column(Float(53))
capital_cost = Column(Float(53))
marginal_cost = Column(Float(53))
length = Column(Float(53))
terrain_factor = Column(Float(53), server_default=text("1"))
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
project = Column(Text)
project_id = Column(BigInteger)
segment = Column(String)
v_nom = Column(BigInteger)
class EgoGridPfHvExtensionLoad(Base):
__tablename__ = 'ego_grid_pf_hv_extension_load'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'BE and NO'::character varying"))
load_id = Column(BigInteger, primary_key=True, nullable=False)
bus = Column(BigInteger)
sign = Column(Float(53), server_default=text("'-1'::integer"))
e_annual = Column(Float(53))
class EgoGridPfHvExtensionSource(Base):
__tablename__ = 'ego_grid_pf_hv_extension_source'
__table_args__ = {'schema': 'model_draft'}
source_id = Column(BigInteger,primary_key=True, nullable=False)
name = Column(Text)
co2_emissions = Column(Float(53))
commentary = Column(Text)
class EgoGridPfHvExtensionStorage(Base):
__tablename__ = 'ego_grid_pf_hv_extension_storage'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
storage_id = Column(BigInteger, primary_key=True, nullable=False)
bus = Column(BigInteger)
dispatch = Column(Text, server_default=text("'flexible'::text"))
control = Column(Text, server_default=text("'PQ'::text"))
p_nom = Column(Float(53), server_default=text("0"))
p_nom_extendable = Column(Boolean, server_default=text("false"))
p_nom_min = Column(Float(53), server_default=text("0"))
p_nom_max = Column(Float(53))
p_min_pu_fixed = Column(Float(53), server_default=text("0"))
p_max_pu_fixed = Column(Float(53), server_default=text("1"))
sign = Column(Float(53), server_default=text("1"))
source = Column(BigInteger)
marginal_cost = Column(Float(53))
capital_cost = Column(Float(53))
efficiency = Column(Float(53))
soc_initial = Column(Float(53))
soc_cyclic = Column(Boolean, server_default=text("false"))
max_hours = Column(Float(53))
efficiency_store = Column(Float(53))
efficiency_dispatch = Column(Float(53))
standing_loss = Column(Float(53))
class EgoGridPfHvExtensionStoragePqSet(Base):
__tablename__ = 'ego_grid_pf_hv_extension_storage_pq_set'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
storage_id = Column(BigInteger, primary_key=True, nullable=False)
temp_id = Column(Integer, primary_key=True, nullable=False)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
p_min_pu = Column(ARRAY(Float(precision=53)))
p_max_pu = Column(ARRAY(Float(precision=53)))
soc_set = Column(ARRAY(Float(precision=53)))
inflow = Column(ARRAY(Float(precision=53)))
class EgoGridPfHvExtensionTempResolution(Base):
__tablename__ = 'ego_grid_pf_hv_extension_temp_resolution'
__table_args__ = {'schema': 'model_draft'}
temp_id = Column(BigInteger, primary_key=True, nullable=False)
timesteps = Column(BigInteger)
resolution = Column(Text)
start_time = Column(DateTime)
class EgoGridPfHvExtensionTransformer(Base):
__tablename__ = 'ego_grid_pf_hv_extension_transformer'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'extension_nep2035_confirmed'::character varying"))
trafo_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
x = Column(Numeric, server_default=text("0"))
r = Column(Numeric, server_default=text("0"))
g = Column(Numeric, server_default=text("0"))
b = Column(Numeric, server_default=text("0"))
s_nom = Column(Float(53), server_default=text("0"))
s_nom_extendable = Column(Boolean, server_default=text("false"))
s_nom_min = Column(Float(53), server_default=text("0"))
s_nom_max = Column(Float(53))
tap_ratio = Column(Float(53))
phase_shift = Column(Float(53))
capital_cost = Column(Float(53), server_default=text("0"))
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
project = Column(String)
v0 = Column(Float(53), server_default=text("0"))
v1 = Column(Float(53), server_default=text("0"))
s0 = Column(Float(53), server_default=text("0"))
s1 = Column(Float(53), server_default=text("0"))
s_min = Column(Float(53), server_default=text("0"))
class EgoGridPfHvLine(Base):
__tablename__ = 'ego_grid_pf_hv_line'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
line_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
x = Column(Numeric, server_default=text("0"))
r = Column(Numeric, server_default=text("0"))
g = Column(Numeric, server_default=text("0"))
b = Column(Numeric, server_default=text("0"))
s_nom = Column(Numeric, server_default=text("0"))
s_nom_extendable = Column(Boolean, server_default=text("false"))
s_nom_min = Column(Float(53), server_default=text("0"))
s_nom_max = Column(Float(53))
capital_cost = Column(Float(53))
length = Column(Float(53))
cables = Column(Integer)
frequency = Column(Numeric)
terrain_factor = Column(Float(53), server_default=text("1"))
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
class EgoGridPfHvLink(Base):
__tablename__ = 'ego_grid_pf_hv_link'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
link_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
efficiency = Column(Float(53), server_default=text("1"))
marginal_cost = Column(Float(53), server_default=text("0"))
p_nom = Column(Numeric, server_default=text("0"))
p_nom_extendable = Column(Boolean, server_default=text("false"))
p_nom_min = Column(Float(53), server_default=text("0"))
p_nom_max = Column(Float(53))
capital_cost = Column(Float(53))
length = Column(Float(53))
terrain_factor = Column(Float(53), server_default=text("1"))
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
class EgoGridPfHvLoad(Base):
__tablename__ = 'ego_grid_pf_hv_load'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
load_id = Column(BigInteger, primary_key=True, nullable=False)
bus = Column(BigInteger)
sign = Column(Float(53), server_default=text("'-1'::integer"))
e_annual = Column(Float(53))
t_ego_grid_pf_hv_nep2035_bus = Table(
'ego_grid_pf_hv_nep2035_bus', metadata,
Column('scn_name', String, nullable=False, server_default=text("'Exogene Netzszenarien'::character varying")),
Column('bus_id', BigInteger, nullable=False),
Column('v_nom', Float(53)),
Column('current_type', Text, server_default=text("'AC'::text")),
Column('v_mag_pu_min', Float(53), server_default=text("0")),
Column('v_mag_pu_max', Float(53)),
Column('geom', Geometry('POINT', 4326)),
schema='model_draft'
)
class EgoGridPfHvNep2035Link(Base):
__tablename__ = 'ego_grid_pf_hv_nep2035_link'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Exogene Netzszenarien'::character varying"))
link_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
efficency = Column(BigInteger, server_default=text("1"))
p_nom = Column(Numeric, server_default=text("0"))
p_nom_extendable = Column(Boolean, server_default=text("false"))
p_nom_min = Column(Float(53), server_default=text("0"))
p_nom_max = Column(Float(53))
capital_cost = Column(Float(53))
length = Column(Float(53))
terrain_factor = Column(Float(53), server_default=text("1"))
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
class EgoGridPfHvResultBus(Base):
__tablename__ = 'ego_grid_pf_hv_result_bus'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
bus_id = Column(BigInteger, primary_key=True, nullable=False)
x = Column(Float(53))
y = Column(Float(53))
v_nom = Column(Float(53))
current_type = Column(Text)
v_mag_pu_min = Column(Float(53))
v_mag_pu_max = Column(Float(53))
geom = Column(Geometry('POINT', 4326))
class EgoGridPfHvResultBusT(Base):
__tablename__ = 'ego_grid_pf_hv_result_bus_t'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
bus_id = Column(BigInteger, primary_key=True, nullable=False)
v_mag_pu_set = Column(ARRAY(Float(precision=53)))
p = Column(ARRAY(Float(precision=53)))
q = Column(ARRAY(Float(precision=53)))
v_mag_pu = Column(ARRAY(Float(precision=53)))
v_ang = Column(ARRAY(Float(precision=53)))
marginal_price = Column(ARRAY(Float(precision=53)))
class EgoGridPfHvResultGenerator(Base):
__tablename__ = 'ego_grid_pf_hv_result_generator'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
generator_id = Column(BigInteger, primary_key=True, nullable=False)
bus = Column(BigInteger)
dispatch = Column(Text)
control = Column(Text)
p_nom = Column(Float(53))
p_nom_extendable = Column(Boolean)
p_nom_min = Column(Float(53))
p_nom_max = Column(Float(53))
p_min_pu_fixed = Column(Float(53))
p_max_pu_fixed = Column(Float(53))
sign = Column(Float(53))
source = Column(BigInteger)
marginal_cost = Column(Float(53))
capital_cost = Column(Float(53))
efficiency = Column(Float(53))
p_nom_opt = Column(Float(53))
class EgoGridPfHvResultGeneratorT(Base):
__tablename__ = 'ego_grid_pf_hv_result_generator_t'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
generator_id = Column(BigInteger, primary_key=True, nullable=False)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
p_min_pu = Column(ARRAY(Float(precision=53)))
p_max_pu = Column(ARRAY(Float(precision=53)))
p = Column(ARRAY(Float(precision=53)))
q = Column(ARRAY(Float(precision=53)))
status = Column(ARRAY(BigInteger()))
class EgoGridPfHvResultLine(Base):
__tablename__ = 'ego_grid_pf_hv_result_line'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
line_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
x = Column(Numeric)
r = Column(Numeric)
g = Column(Numeric)
b = Column(Numeric)
s_nom = Column(Numeric)
s_nom_extendable = Column(Boolean)
s_nom_min = Column(Float(53))
s_nom_max = Column(Float(53))
capital_cost = Column(Float(53))
length = Column(Float(53))
cables = Column(Integer)
frequency = Column(Numeric)
terrain_factor = Column(Float(53), server_default=text("1"))
x_pu = Column(Numeric)
r_pu = Column(Numeric)
g_pu = Column(Numeric)
b_pu = Column(Numeric)
s_nom_opt = Column(Numeric)
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
class EgoGridPfHvResultLineT(Base):
__tablename__ = 'ego_grid_pf_hv_result_line_t'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
line_id = Column(BigInteger, primary_key=True, nullable=False)
p0 = Column(ARRAY(Float(precision=53)))
q0 = Column(ARRAY(Float(precision=53)))
p1 = Column(ARRAY(Float(precision=53)))
q1 = Column(ARRAY(Float(precision=53)))
class EgoGridPfHvResultLoad(Base):
__tablename__ = 'ego_grid_pf_hv_result_load'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
load_id = Column(BigInteger, primary_key=True, nullable=False)
bus = Column(BigInteger)
sign = Column(Float(53))
e_annual = Column(Float(53))
class EgoGridPfHvResultLoadT(Base):
__tablename__ = 'ego_grid_pf_hv_result_load_t'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
load_id = Column(BigInteger, primary_key=True, nullable=False)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
p = Column(ARRAY(Float(precision=53)))
q = Column(ARRAY(Float(precision=53)))
class EgoGridPfHvResultMeta(Base):
__tablename__ = 'ego_grid_pf_hv_result_meta'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True)
scn_name = Column(String)
calc_date = Column(DateTime)
user_name = Column(String)
method = Column(String)
start_snapshot = Column(Integer)
end_snapshot = Column(Integer)
snapshots = Column(ARRAY(DateTime()))
solver = Column(String)
settings = Column(JSON)
safe_results = Column(Boolean, server_default=text("false"))
class EgoGridPfHvResultStorage(Base):
__tablename__ = 'ego_grid_pf_hv_result_storage'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
storage_id = Column(BigInteger, primary_key=True, nullable=False)
bus = Column(BigInteger)
dispatch = Column(Text)
control = Column(Text)
p_nom = Column(Float(53))
p_nom_extendable = Column(Boolean)
p_nom_min = Column(Float(53))
p_nom_max = Column(Float(53))
p_min_pu_fixed = Column(Float(53))
p_max_pu_fixed = Column(Float(53))
sign = Column(Float(53))
source = Column(BigInteger)
marginal_cost = Column(Float(53))
capital_cost = Column(Float(53))
efficiency = Column(Float(53))
soc_initial = Column(Float(53))
soc_cyclic = Column(Boolean, server_default=text("false"))
max_hours = Column(Float(53))
efficiency_store = Column(Float(53))
efficiency_dispatch = Column(Float(53))
standing_loss = Column(Float(53))
p_nom_opt = Column(Float(53))
class EgoGridPfHvResultStorageT(Base):
__tablename__ = 'ego_grid_pf_hv_result_storage_t'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
storage_id = Column(BigInteger, primary_key=True, nullable=False)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
p_min_pu = Column(ARRAY(Float(precision=53)))
p_max_pu = Column(ARRAY(Float(precision=53)))
soc_set = Column(ARRAY(Float(precision=53)))
inflow = Column(ARRAY(Float(precision=53)))
p = Column(ARRAY(Float(precision=53)))
q = Column(ARRAY(Float(precision=53)))
state_of_charge = Column(ARRAY(Float(precision=53)))
spill = Column(ARRAY(Float(precision=53)))
class EgoGridPfHvResultTransformer(Base):
__tablename__ = 'ego_grid_pf_hv_result_transformer'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
trafo_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
x = Column(Numeric)
r = Column(Numeric)
g = Column(Numeric)
b = Column(Numeric)
s_nom = Column(Numeric)
s_nom_extendable = Column(Boolean)
s_nom_min = Column(Float(53))
s_nom_max = Column(Float(53))
tap_ratio = Column(Float(53))
phase_shift = Column(Float(53))
capital_cost = Column(Float(53))
x_pu = Column(Numeric)
r_pu = Column(Numeric)
g_pu = Column(Numeric)
b_pu = Column(Numeric)
s_nom_opt = Column(Numeric)
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
class EgoGridPfHvResultTransformerT(Base):
__tablename__ = 'ego_grid_pf_hv_result_transformer_t'
__table_args__ = {'schema': 'model_draft'}
result_id = Column(BigInteger, primary_key=True, nullable=False)
trafo_id = Column(BigInteger, primary_key=True, nullable=False)
p0 = Column(ARRAY(Float(precision=53)))
q0 = Column(ARRAY(Float(precision=53)))
p1 = Column(ARRAY(Float(precision=53)))
q1 = Column(ARRAY(Float(precision=53)))
class EgoGridPfHvScenarioSetting(Base):
__tablename__ = 'ego_grid_pf_hv_scenario_settings'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, server_default=text("'Status Quo'::character varying"))
bus = Column(String)
bus_v_mag_set = Column(String)
generator = Column(String)
generator_pq_set = Column(String)
line = Column(String)
load = Column(String)
load_pq_set = Column(String)
storage = Column(String)
storage_pq_set = Column(String)
temp_resolution = Column(String)
transformer = Column(String)
class EgoGridPfHvSource(Base):
__tablename__ = 'ego_grid_pf_hv_source'
__table_args__ = {'schema': 'model_draft'}
source_id = Column(BigInteger, primary_key=True)
name = Column(Text)
co2_emissions = Column(Float(53))
commentary = Column(Text)
t_ego_grid_pf_hv_storage_eins = Table(
'ego_grid_pf_hv_storage_eins', metadata,
Column('scn_name', String, nullable=False, server_default=text("'Status Quo'::character varying")),
Column('storage_id', BigInteger, nullable=False),
Column('bus', BigInteger),
Column('dispatch', Text, server_default=text("'flexible'::text")),
Column('control', Text, server_default=text("'PQ'::text")),
Column('p_nom', Float(53), server_default=text("0")),
Column('p_nom_extendable', Boolean, server_default=text("false")),
Column('p_nom_min', Float(53), server_default=text("0")),
Column('p_nom_max', Float(53)),
Column('p_min_pu_fixed', Float(53), server_default=text("0")),
Column('p_max_pu_fixed', Float(53), server_default=text("1")),
Column('sign', Float(53), server_default=text("1")),
Column('source', BigInteger),
Column('marginal_cost', Float(53)),
Column('capital_cost', Float(53)),
Column('efficiency', Float(53)),
Column('soc_initial', Float(53)),
Column('soc_cyclic', Boolean, server_default=text("false")),
Column('max_hours', Float(53)),
Column('efficiency_store', Float(53)),
Column('efficiency_dispatch', Float(53)),
Column('standing_loss', Float(53)),
schema='model_draft'
)
class EgoGridPfHvTempResolution(Base):
__tablename__ = 'ego_grid_pf_hv_temp_resolution'
__table_args__ = {'schema': 'model_draft'}
temp_id = Column(BigInteger, primary_key=True)
timesteps = Column(BigInteger, nullable=False)
resolution = Column(Text)
start_time = Column(DateTime)
class EgoGridPfHvTransformer(Base):
__tablename__ = 'ego_grid_pf_hv_transformer'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
trafo_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger, index=True)
bus1 = Column(BigInteger, index=True)
x = Column(Numeric, server_default=text("0"))
r = Column(Numeric, server_default=text("0"))
g = Column(Numeric, server_default=text("0"))
b = Column(Numeric, server_default=text("0"))
s_nom = Column(Float(53), server_default=text("0"))
s_nom_extendable = Column(Boolean, server_default=text("false"))
s_nom_min = Column(Float(53), server_default=text("0"))
s_nom_max = Column(Float(53))
tap_ratio = Column(Float(53))
phase_shift = Column(Float(53))
capital_cost = Column(Float(53), server_default=text("0"))
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
class EgoGridPfMvBus(Base):
__tablename__ = 'ego_grid_pf_mv_bus'
__table_args__ = {'schema': 'model_draft'}
bus_id = Column(String(25), primary_key=True, nullable=False)
v_nom = Column(Float(53))
v_mag_pu_min = Column(Float(53), server_default=text("0"))
v_mag_pu_max = Column(Float(53))
geom = Column(Geometry('POINT', 4326))
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
grid_id = Column(Integer)
class EgoGridPfMvBusVMagSet(Base):
__tablename__ = 'ego_grid_pf_mv_bus_v_mag_set'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
bus_id = Column(String(25), primary_key=True, nullable=False)
temp_id = Column(Integer, primary_key=True, nullable=False)
v_mag_pu_set = Column(ARRAY(Float(precision=53)))
grid_id = Column(Integer)
class EgoGridPfMvGenerator(Base):
__tablename__ = 'ego_grid_pf_mv_generator'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
generator_id = Column(String(25), primary_key=True, nullable=False)
bus = Column(String(25))
control = Column(Text, server_default=text("'PQ'::text"))
p_nom = Column(Float(53), server_default=text("0"))
p_min_pu_fixed = Column(Float(53), server_default=text("0"))
p_max_pu_fixed = Column(Float(53), server_default=text("1"))
sign = Column(Float(53), server_default=text("1"))
grid_id = Column(Integer)
class EgoGridPfMvGeneratorPqSet(Base):
__tablename__ = 'ego_grid_pf_mv_generator_pq_set'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
generator_id = Column(String(25), primary_key=True, nullable=False)
temp_id = Column(Integer, primary_key=True, nullable=False)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
p_min_pu = Column(ARRAY(Float(precision=53)))
p_max_pu = Column(ARRAY(Float(precision=53)))
grid_id = Column(Integer)
class EgoGridPfMvLine(Base):
__tablename__ = 'ego_grid_pf_mv_line'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
line_id = Column(String(25), primary_key=True, nullable=False)
bus0 = Column(String(25))
bus1 = Column(String(25))
x = Column(Numeric, server_default=text("0"))
r = Column(Numeric, server_default=text("0"))
g = Column(Numeric, server_default=text("0"))
b = Column(Numeric, server_default=text("0"))
s_nom = Column(Numeric, server_default=text("0"))
length = Column(Float(53))
cables = Column(Integer)
geom = Column(Geometry('LINESTRING', 4326))
grid_id = Column(Integer)
class EgoGridPfMvLoad(Base):
__tablename__ = 'ego_grid_pf_mv_load'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
load_id = Column(String(25), primary_key=True, nullable=False)
bus = Column(String(25))
sign = Column(Float(53), server_default=text("'-1'::integer"))
grid_id = Column(Integer)
class EgoGridPfMvLoadPqSet(Base):
__tablename__ = 'ego_grid_pf_mv_load_pq_set'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
load_id = Column(String(25), primary_key=True, nullable=False)
temp_id = Column(Integer, primary_key=True, nullable=False)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
grid_id = Column(Integer)
class EgoGridPfMvResBus(Base):
__tablename__ = 'ego_grid_pf_mv_res_bus'
__table_args__ = {'schema': 'model_draft'}
bus_id = Column(String(25), primary_key=True)
v_mag_pu = Column(ARRAY(Float(precision=53)))
class EgoGridPfMvResLine(Base):
__tablename__ = 'ego_grid_pf_mv_res_line'
__table_args__ = {'schema': 'model_draft'}
line_id = Column(String(25), primary_key=True)
p0 = Column(ARRAY(Float(precision=53)))
p1 = Column(ARRAY(Float(precision=53)))
q0 = Column(ARRAY(Float(precision=53)))
q1 = Column(ARRAY(Float(precision=53)))
class EgoGridPfMvResTransformer(Base):
__tablename__ = 'ego_grid_pf_mv_res_transformer'
__table_args__ = {'schema': 'model_draft'}
trafo_id = Column(String(25), primary_key=True)
p0 = Column(ARRAY(Float(precision=53)))
p1 = Column(ARRAY(Float(precision=53)))
q0 = Column(ARRAY(Float(precision=53)))
q1 = Column(ARRAY(Float(precision=53)))
class EgoGridPfMvScenarioSetting(Base):
__tablename__ = 'ego_grid_pf_mv_scenario_settings'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, server_default=text("'Status Quo'::character varying"))
bus = Column(String)
bus_v_mag_set = Column(String)
generator = Column(String)
generator_pq_set = Column(String)
line = Column(String)
load = Column(String)
load_pq_set = Column(String)
storage = Column(String)
storage_pq_set = Column(String)
temp_resolution = Column(String)
transformer = Column(String)
class EgoGridPfMvSource(Base):
__tablename__ = 'ego_grid_pf_mv_source'
__table_args__ = {'schema': 'model_draft'}
source_id = Column(String(25), primary_key=True)
name = Column(Text)
co2_emissions = Column(Float(53))
commentary = Column(Text)
class EgoGridPfMvTempResolution(Base):
__tablename__ = 'ego_grid_pf_mv_temp_resolution'
__table_args__ = {'schema': 'model_draft'}
temp_id = Column(BigInteger, primary_key=True)
timesteps = Column(BigInteger, nullable=False)
resolution = Column(Text)
start_time = Column(DateTime)
grid_id = Column(Integer)
class EgoGridPfMvTransformer(Base):
__tablename__ = 'ego_grid_pf_mv_transformer'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
trafo_id = Column(String(25), primary_key=True, nullable=False)
bus0 = Column(String(25))
bus1 = Column(String(25))
x = Column(Numeric, server_default=text("0"))
r = Column(Numeric, server_default=text("0"))
g = Column(Numeric, server_default=text("0"))
b = Column(Numeric, server_default=text("0"))
s_nom = Column(Float(53), server_default=text("0"))
tap_ratio = Column(Float(53))
geom = Column(Geometry('POINT', 4326))
grid_id = Column(Integer)
class EgoGridPpEntsoeBus(Base):
__tablename__ = 'ego_grid_pp_entsoe_bus'
__table_args__ = {'schema': 'model_draft'}
bus_id = Column(BigInteger, primary_key=True)
station_id = Column(BigInteger)
voltage = Column(Float(53))
dc = Column(Boolean)
symbol = Column(String)
country = Column(Text)
under_construction = Column(Boolean)
geom = Column(Geometry('POINT'))
class EgoGridPpEntsoeLine(Base):
__tablename__ = 'ego_grid_pp_entsoe_line'
__table_args__ = {'schema': 'model_draft'}
link_id = Column(BigInteger, primary_key=True)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
voltage = Column(Float(53))
circiuts = Column(BigInteger)
dc = Column(Boolean)
underground = Column(Boolean)
under_construction = Column(Boolean)
country1 = Column(String)
country2 = Column(String)
geom = Column(Geometry('LINESTRING'))
class EgoHvmvSubstation(Base):
__tablename__ = 'ego_hvmv_substation'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, primary_key=True, nullable=False)
subst_id = Column(SmallInteger, primary_key=True, nullable=False)
subst_name = Column(Text)
ags_0 = Column(Text)
voltage = Column(Text)
power_type = Column(Text)
substation = Column(Text)
osm_id = Column(Text)
osm_www = Column(Text)
frequency = Column(Text)
ref = Column(Text)
operator = Column(Text)
dbahn = Column(Text)
status = Column(SmallInteger)
otg_id = Column(BigInteger)
lat = Column(Float(53))
lon = Column(Float(53))
point = Column(Geometry('POINT', 4326))
polygon = Column(Geometry)
geom = Column(Geometry('POINT', 3035))
class EgoLanduseIndustry(Base):
__tablename__ = 'ego_landuse_industry'
__table_args__ = {'schema': 'model_draft'}
gid = Column(Integer, primary_key=True)
osm_id = Column(Integer)
name = Column(Text)
sector = Column(Integer)
area_ha = Column(Float(53))
tags = Column(HSTORE(Text()))
vg250 = Column(Text)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
geom_centroid = Column(Geometry('POINT', 3035), index=True)
geom_surfacepoint = Column(Geometry('POINT', 3035), index=True)
geom_centre = Column(Geometry('POINT', 3035), index=True)
nuts = Column(String(5))
consumption = Column(Numeric)
peak_load = Column(Numeric)
class EgoLattice1km(Base):
__tablename__ = 'ego_lattice_1km'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
geom = Column(Geometry('POINT', 3035))
subst_id = Column(BigInteger)
class EgoLattice2km(Base):
__tablename__ = 'ego_lattice_2km'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
geom = Column(Geometry('POINT', 3035))
subst_id = Column(BigInteger)
class EgoLattice2pt5km(Base):
__tablename__ = 'ego_lattice_2pt5km'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
geom = Column(Geometry('POINT', 3035))
subst_id = Column(BigInteger)
class EgoLattice360mLv(Base):
__tablename__ = 'ego_lattice_360m_lv'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_lattice_360m_lv_id_seq'::regclass)"))
la_id = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoLattice500m(Base):
__tablename__ = 'ego_lattice_500m'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_lattice_500m_id_seq'::regclass)"))
subst_id = Column(Integer)
area_type = Column(Text)
geom_box = Column(Geometry('POLYGON', 3035), index=True)
geom = Column(Geometry('POINT', 3035), index=True)
t_ego_lattice_500m_la_mview = Table(
'ego_lattice_500m_la_mview', metadata,
Column('id', Integer),
Column('subst_id', Integer),
Column('area_type', Text),
Column('geom_box', Geometry('POLYGON', 3035)),
Column('geom', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_lattice_500m_out_mview = Table(
'ego_lattice_500m_out_mview', metadata,
Column('id', Integer),
Column('subst_id', Integer),
Column('area_type', Text),
Column('geom_box', Geometry('POLYGON', 3035)),
Column('geom', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_lattice_500m_wpa_mview = Table(
'ego_lattice_500m_wpa_mview', metadata,
Column('id', Integer),
Column('subst_id', Integer),
Column('area_type', Text),
Column('geom_box', Geometry('POLYGON', 3035)),
Column('geom', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_lattice_500m_x_mview = Table(
'ego_lattice_500m_x_mview', metadata,
Column('id', Integer),
Column('subst_id', Integer),
Column('area_type', Text),
Column('geom_box', Geometry('POLYGON', 3035)),
Column('geom', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
class EgoLattice50m(Base):
__tablename__ = 'ego_lattice_50m'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_lattice_50m_id_seq'::regclass)"))
subst_id = Column(Integer)
area_type = Column(Text)
geom_box = Column(Geometry('POLYGON', 3035), index=True)
geom = Column(Geometry('POINT', 3035), index=True)
t_ego_lattice_50m_la_mview = Table(
'ego_lattice_50m_la_mview', metadata,
Column('id', Integer),
Column('subst_id', Integer),
Column('area_type', Text),
Column('geom_box', Geometry('POLYGON', 3035)),
Column('geom', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
class EgoLoadarea(Base):
__tablename__ = 'ego_loadarea'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, primary_key=True, nullable=False)
id = Column(Integer, primary_key=True, nullable=False)
subst_id = Column(Integer)
area_ha = Column(Numeric)
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
otg_id = Column(Integer)
un_id = Column(Integer)
zensus_sum = Column(Integer)
zensus_count = Column(Integer)
zensus_density = Column(Numeric)
ioer_sum = Column(Numeric)
ioer_count = Column(Integer)
ioer_density = Column(Numeric)
sector_area_residential = Column(Numeric)
sector_area_retail = Column(Numeric)
sector_area_industrial = Column(Numeric)
sector_area_agricultural = Column(Numeric)
sector_area_sum = Column(Numeric)
sector_share_residential = Column(Numeric)
sector_share_retail = Column(Numeric)
sector_share_industrial = Column(Numeric)
sector_share_agricultural = Column(Numeric)
sector_share_sum = Column(Numeric)
sector_count_residential = Column(Integer)
sector_count_retail = Column(Integer)
sector_count_industrial = Column(Integer)
sector_count_agricultural = Column(Integer)
sector_count_sum = Column(Integer)
sector_consumption_residential = Column(Numeric)
sector_consumption_retail = Column(Numeric)
sector_consumption_industrial = Column(Numeric)
sector_consumption_agricultural = Column(Numeric)
sector_consumption_sum = Column(Numeric)
geom_centroid = Column(Geometry('POINT', 3035))
geom_surfacepoint = Column(Geometry('POINT', 3035))
geom_centre = Column(Geometry('POINT', 3035))
geom = Column(Geometry('POLYGON', 3035))
class EgoMvGriddistrict(Base):
__tablename__ = 'ego_mv_griddistrict'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, primary_key=True, nullable=False)
subst_id = Column(Integer, primary_key=True, nullable=False, server_default=text("nextval('model_draft.ego_mv_griddistrict_subst_id_seq'::regclass)"))
subst_sum = Column(Text)
area_ha = Column(Numeric)
geom_type = Column(Text)
geom = Column(Geometry('MULTIPOLYGON', 3035))
class EgoNeighboursOffshorePoint(Base):
__tablename__ = 'ego_neighbours_offshore_point'
__table_args__ = {'schema': 'model_draft'}
cntr_id = Column(Text, primary_key=True)
coastdat_id = Column(BigInteger)
geom = Column(Geometry('POINT', 4326))
class EgoOsmAgriculturePerMvgd(Base):
__tablename__ = 'ego_osm_agriculture_per_mvgd'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_osm_agriculture_per_mvgd_id_seq'::regclass)"))
subst_id = Column(Integer)
area_ha = Column(Numeric)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoOsmDeuStreetStreetcrossing(Base):
__tablename__ = 'ego_osm_deu_street_streetcrossing'
__table_args__ = {'schema': 'model_draft'}
geom = Column(Geometry, index=True)
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_osm_deu_street_streetcrossing_id_seq'::regclass)"))
class EgoOsmSectorPerGriddistrict1Residential(Base):
__tablename__ = 'ego_osm_sector_per_griddistrict_1_residential'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_osm_sector_per_griddistrict_1_residential_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoOsmSectorPerGriddistrict2Retail(Base):
__tablename__ = 'ego_osm_sector_per_griddistrict_2_retail'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_osm_sector_per_griddistrict_2_retail_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoOsmSectorPerGriddistrict3Industrial(Base):
__tablename__ = 'ego_osm_sector_per_griddistrict_3_industrial'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_osm_sector_per_griddistrict_3_industrial_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoOsmSectorPerGriddistrict4Agricultural(Base):
__tablename__ = 'ego_osm_sector_per_griddistrict_4_agricultural'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_osm_sector_per_griddistrict_4_agricultural_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
subst_id = Column(Integer)
area_ha = Column(Float(53))
class EgoOsmSectorPerLvgd1Residential(Base):
__tablename__ = 'ego_osm_sector_per_lvgd_1_residential'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_osm_sector_per_lvgd_1_residential_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoOsmSectorPerLvgd2Retail(Base):
__tablename__ = 'ego_osm_sector_per_lvgd_2_retail'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_osm_sector_per_lvgd_2_retail_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoOsmSectorPerLvgd3Industrial(Base):
__tablename__ = 'ego_osm_sector_per_lvgd_3_industrial'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_osm_sector_per_lvgd_3_industrial_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoOsmSectorPerLvgd4Agricultural(Base):
__tablename__ = 'ego_osm_sector_per_lvgd_4_agricultural'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_osm_sector_per_lvgd_4_agricultural_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035), index=True)
t_ego_political_boundary_bkg_vg250_6_gem_hole_mview = Table(
'ego_political_boundary_bkg_vg250_6_gem_hole_mview', metadata,
Column('id', Integer, unique=True),
Column('old_id', Integer),
Column('gen', Text),
Column('bez', Text),
Column('bem', Text),
Column('nuts', String(5)),
Column('rs_0', String(12)),
Column('ags_0', String(12)),
Column('area_ha', Numeric),
Column('count_hole', Integer),
Column('path', ARRAY(Integer())),
Column('is_hole', Boolean),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
class EgoPoliticalBoundaryHvmvSubstPerGem(Base):
__tablename__ = 'ego_political_boundary_hvmv_subst_per_gem'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
old_id = Column(Integer)
gen = Column(Text)
bez = Column(Text)
bem = Column(Text)
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
area_ha = Column(Numeric)
count_hole = Column(Integer)
path = Column(ARRAY(Integer()))
is_hole = Column(Boolean)
geom = Column(Geometry('POLYGON', 3035), index=True)
subst_sum = Column(Integer)
subst_type = Column(Integer)
t_ego_political_boundary_hvmv_subst_per_gem_1_mview = Table(
'ego_political_boundary_hvmv_subst_per_gem_1_mview', metadata,
Column('id', Integer, unique=True),
Column('gen', Text),
Column('bez', Text),
Column('ags_0', String(12)),
Column('subst_type', Integer),
Column('geom', Geometry('POLYGON', 3035)),
schema='model_draft'
)
t_ego_political_boundary_hvmv_subst_per_gem_2_mview = Table(
'ego_political_boundary_hvmv_subst_per_gem_2_mview', metadata,
Column('id', Integer, unique=True),
Column('gen', Text),
Column('bez', Text),
Column('ags_0', String(12)),
Column('subst_type', Integer),
Column('geom', Geometry('POLYGON', 3035)),
schema='model_draft'
)
t_ego_political_boundary_hvmv_subst_per_gem_3_mview = Table(
'ego_political_boundary_hvmv_subst_per_gem_3_mview', metadata,
Column('id', Integer, unique=True),
Column('gen', Text),
Column('bez', Text),
Column('ags_0', String(12)),
Column('subst_type', Integer),
Column('geom', Geometry('POLYGON', 3035)),
schema='model_draft'
)
class EgoPoliticalBoundaryHvmvSubstPerGem3Nn(Base):
__tablename__ = 'ego_political_boundary_hvmv_subst_per_gem_3_nn'
__table_args__ = {'schema': 'model_draft'}
mun_id = Column(Integer, primary_key=True)
mun_ags_0 = Column(String(12))
subst_ags_0 = Column(Text)
subst_id = Column(Integer)
subst_type = Column(Integer)
geom_sub = Column(Geometry('POINT', 3035), index=True)
distance = Column(Float(53))
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
t_ego_political_boundary_hvmv_subst_per_gem_3_nn_line = Table(
'ego_political_boundary_hvmv_subst_per_gem_3_nn_line', metadata,
Column('id', BigInteger, unique=True),
Column('nn_id', Integer),
Column('subst_id', Integer),
Column('geom_centre', Geometry('POINT', 3035), index=True),
Column('geom', Geometry('LINESTRING', 3035), index=True),
schema='model_draft'
)
t_ego_political_boundary_hvmv_subst_per_gem_3_nn_union = Table(
'ego_political_boundary_hvmv_subst_per_gem_3_nn_union', metadata,
Column('subst_id', Integer, unique=True),
Column('subst_type', Integer),
Column('geom', Geometry('MULTIPOLYGON', 3035), index=True),
schema='model_draft'
)
class EgoPowerClas(Base):
__tablename__ = 'ego_power_class'
__table_args__ = {'schema': 'model_draft'}
power_class_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_power_class_power_class_id_seq'::regclass)"))
lower_limit = Column(Float(53))
upper_limit = Column(Float(53))
wea = Column(Text)
h_hub = Column(Float(53))
d_rotor = Column(Float(53))
class EgoRenewableFeedin(Base):
__tablename__ = 'ego_renewable_feedin'
__table_args__ = {'schema': 'model_draft'}
weather_scenario_id = Column(Integer, primary_key=True, nullable=False)
w_id = Column(Integer, primary_key=True, nullable=False)
source = Column(Text, primary_key=True, nullable=False)
weather_year = Column(Integer, primary_key=True, nullable=False)
power_class = Column(Integer, primary_key=True, nullable=False)
feedin = Column(ARRAY(Float(precision=53)))
class EgoRenewableFeedinV031(Base):
__tablename__ = 'ego_renewable_feedin_v031'
__table_args__ = {'schema': 'model_draft'}
weather_scenario_id = Column(Integer, primary_key=True, nullable=False)
w_id = Column(Integer, primary_key=True, nullable=False)
source = Column(Text, primary_key=True, nullable=False)
weather_year = Column(Integer, primary_key=True, nullable=False)
power_class = Column(Integer, primary_key=True, nullable=False)
feedin = Column(ARRAY(Float(precision=53)))
t_ego_renewable_powerplant_eaa_mview = Table(
'ego_renewable_powerplant_eaa_mview', metadata,
Column('id', BigInteger, unique=True),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326), index=True),
schema='model_draft'
)
t_ego_renpassgis_simple_feedin_mview = Table(
'ego_renpassgis_simple_feedin_mview', metadata,
Column('hour', BigInteger),
Column('generation_type', Text),
Column('scenario', Text),
Column('total_cap_feedin', Numeric),
schema='model_draft'
)
t_ego_res_powerplant_costdat_gid = Table(
'ego_res_powerplant_costdat_gid', metadata,
Column('id', Integer),
Column('coastdat_gid', BigInteger),
schema='model_draft'
)
class EgoScenario(Base):
__tablename__ = 'ego_scenario'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, nullable=False, server_default=text("nextval('model_draft.ego_scenario_id_seq'::regclass)"))
model = Column(Text)
version = Column(Text, primary_key=True, nullable=False)
version_name = Column(Text)
release = Column(Boolean)
comment = Column(Text)
timestamp = Column(DateTime)
class EgoScenarioInput(Base):
__tablename__ = 'ego_scenario_input'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_scenario_input_id_seq'::regclass)"))
version = Column(Text)
oid = Column(OID)
database = Column(String)
table_schema = Column(String)
table_name = Column(String)
path = Column(Text)
metadata_title = Column(Text)
metadata_reference_date = Column(Text)
meta_data = Column(Text)
class EgoScenarioOverview(Base):
__tablename__ = 'ego_scenario_overview'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_scenario_overview_id_seq'::regclass)"))
name = Column(Text)
version = Column(Text)
cnt = Column(Integer)
class EgoSimpleFeedinFull(Base):
__tablename__ = 'ego_simple_feedin_full'
__table_args__ = (
Index('ego_simple_feedin_full_idx', 'scenario', 'sub_id'),
{'schema': 'model_draft'}
)
hour = Column(BigInteger, primary_key=True, nullable=False)
coastdat_id = Column(BigInteger, primary_key=True, nullable=False)
sub_id = Column(BigInteger, primary_key=True, nullable=False)
generation_type = Column(Text, primary_key=True, nullable=False)
feedin = Column(Numeric(23, 20))
scenario = Column(Text, primary_key=True, nullable=False)
weighted_feedin = Column(Numeric(23, 20))
class EgoSmallChpPlantGermany(Base):
__tablename__ = 'ego_small_chp_plant_germany'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True)
start_up_date = Column(DateTime)
electrical_capacity = Column(Numeric)
generation_type = Column(Text)
generation_subtype = Column(String)
thermal_capacity = Column(Numeric)
city = Column(String)
postcode = Column(String)
address = Column(String)
lon = Column(Numeric)
lat = Column(Numeric)
gps_accuracy = Column(String)
validation = Column(String)
notification_reason = Column(String)
eeg_id = Column(String)
tso = Column(Float(53))
tso_eic = Column(String)
dso_id = Column(String)
dso = Column(String)
voltage_level = Column(String)
network_node = Column(String)
power_plant_id = Column(String)
source = Column(String)
comment = Column(String)
geom = Column(Geometry('POINT', 4326))
class EgoSocialZensusLoad(Base):
__tablename__ = 'ego_social_zensus_load'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_social_zensus_load_id_seq'::regclass)"))
gid = Column(Integer)
population = Column(Integer)
inside_la = Column(Boolean)
geom_point = Column(Geometry('POINT', 3035), index=True)
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoSocialZensusLoadCluster(Base):
__tablename__ = 'ego_social_zensus_load_cluster'
__table_args__ = {'schema': 'model_draft'}
cid = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_social_zensus_load_cluster_cid_seq'::regclass)"))
zensus_sum = Column(Integer)
area_ha = Column(Integer)
geom = Column(Geometry('POLYGON', 3035), index=True)
geom_buffer = Column(Geometry('POLYGON', 3035))
geom_centroid = Column(Geometry('POINT', 3035), index=True)
geom_surfacepoint = Column(Geometry('POINT', 3035), index=True)
t_ego_society_zensus_per_la_mview = Table(
'ego_society_zensus_per_la_mview', metadata,
Column('name', Text),
Column('sum', Numeric),
Column('census_count', BigInteger),
schema='model_draft'
)
class EgoStorageH2AreasDe(Base):
__tablename__ = 'ego_storage_h2_areas_de'
__table_args__ = {'schema': 'model_draft'}
gid = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_storage_h2_areas_de_gid_seq'::regclass)"))
objectid = Column(Numeric(10, 0))
bezeichnun = Column(String(50))
salzstrukt = Column(String(50))
id = Column(Integer)
shape_star = Column(Numeric)
shape_stle = Column(Numeric)
geom = Column(Geometry('MULTIPOLYGON', 4326), index=True)
t_ego_supply_aggr_weather = Table(
'ego_supply_aggr_weather', metadata,
Column('aggr_id', BigInteger),
Column('w_id', BigInteger),
Column('scn_name', String),
Column('bus', BigInteger),
Column('row_number', BigInteger),
schema='model_draft'
)
t_ego_supply_conv_nep2035_temp = Table(
'ego_supply_conv_nep2035_temp', metadata,
Column('preversion', Text),
Column('id', Integer),
Column('bnetza_id', Text),
Column('company', Text),
Column('name', Text),
Column('postcode', Text),
Column('city', Text),
Column('street', Text),
Column('state', Text),
Column('block', Text),
Column('commissioned_original', Text),
Column('commissioned', Float(53)),
Column('retrofit', Float(53)),
Column('shutdown', Float(53)),
Column('status', Text),
Column('fuel', Text),
Column('technology', Text),
Column('type', Text),
Column('eeg', Text),
Column('chp', Text),
Column('capacity', Float(53)),
Column('capacity_uba', Float(53)),
Column('chp_capacity_uba', Float(53)),
Column('efficiency_data', Float(53)),
Column('efficiency_estimate', Float(53)),
Column('network_node', Text),
Column('voltage', Text),
Column('network_operator', Text),
Column('name_uba', Text),
Column('lat', Float(53)),
Column('lon', Float(53)),
Column('comment', Text),
Column('geom', Geometry('POINT', 4326), index=True),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('la_id', Integer),
Column('scenario', Text),
Column('flag', Text),
Column('nuts', String),
schema='model_draft'
)
t_ego_supply_conv_powerplant = Table(
'ego_supply_conv_powerplant', metadata,
Column('gid', Integer),
Column('bnetza_id', Text),
Column('company', Text),
Column('name', Text),
Column('postcode', Text),
Column('city', Text),
Column('street', Text),
Column('state', Text),
Column('block', Text),
Column('commissioned_original', Text),
Column('commissioned', Float(53)),
Column('retrofit', Float(53)),
Column('shutdown', Float(53)),
Column('status', Text),
Column('fuel', Text),
Column('technology', Text),
Column('type', Text),
Column('eeg', Text),
Column('chp', Text),
Column('capacity', Float(53)),
Column('capacity_uba', Float(53)),
Column('chp_capacity_uba', Float(53)),
Column('efficiency_data', Float(53)),
Column('efficiency_estimate', Float(53)),
Column('network_node', Text),
Column('voltage', Text),
Column('network_operator', Text),
Column('name_uba', Text),
Column('lat', Float(53)),
Column('lon', Float(53)),
Column('comment', Text),
Column('geom', Geometry('POINT', 4326), index=True),
schema='model_draft'
)
t_ego_supply_conv_powerplant_2035 = Table(
'ego_supply_conv_powerplant_2035', metadata,
Column('preversion', Text),
Column('id', Integer),
Column('bnetza_id', Text),
Column('company', Text),
Column('name', Text),
Column('postcode', Text),
Column('city', Text),
Column('street', Text),
Column('state', Text),
Column('block', Text),
Column('commissioned_original', Text),
Column('commissioned', Float(53)),
Column('retrofit', Float(53)),
Column('shutdown', Float(53)),
Column('status', Text),
Column('fuel', Text),
Column('technology', Text),
Column('type', Text),
Column('eeg', Text),
Column('chp', Text),
Column('capacity', Float(53)),
Column('capacity_uba', Float(53)),
Column('chp_capacity_uba', Float(53)),
Column('efficiency_data', Float(53)),
Column('efficiency_estimate', Float(53)),
Column('network_node', Text),
Column('voltage', Text),
Column('network_operator', Text),
Column('name_uba', Text),
Column('lat', Float(53)),
Column('lon', Float(53)),
Column('comment', Text),
Column('geom', Geometry('POINT', 4326), index=True),
Column('la_id', Integer),
Column('scenario', Text),
Column('flag', Text),
Column('nuts', String),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
schema='model_draft'
)
t_ego_supply_conv_powerplant_ego100_mview = Table(
'ego_supply_conv_powerplant_ego100_mview', metadata,
Column('preversion', Text),
Column('id', Integer),
Column('bnetza_id', Text),
Column('company', Text),
Column('name', Text),
Column('postcode', Text),
Column('city', Text),
Column('street', Text),
Column('state', Text),
Column('block', Text),
Column('commissioned_original', Text),
Column('commissioned', Float(53)),
Column('retrofit', Float(53)),
Column('shutdown', Float(53)),
Column('status', Text),
Column('fuel', Text),
Column('technology', Text),
Column('type', Text),
Column('eeg', Text),
Column('chp', Text),
Column('capacity', Float(53)),
Column('capacity_uba', Float(53)),
Column('chp_capacity_uba', Float(53)),
Column('efficiency_data', Float(53)),
Column('efficiency_estimate', Float(53)),
Column('network_node', Text),
Column('voltage', Text),
Column('network_operator', Text),
Column('name_uba', Text),
Column('lat', Float(53)),
Column('lon', Float(53)),
Column('comment', Text),
Column('geom', Geometry('POINT', 4326)),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('la_id', Integer),
Column('scenario', Text),
Column('flag', Text),
Column('nuts', String),
schema='model_draft'
)
t_ego_supply_conv_powerplant_nep2035_mview = Table(
'ego_supply_conv_powerplant_nep2035_mview', metadata,
Column('preversion', Text),
Column('id', Integer),
Column('bnetza_id', Text),
Column('company', Text),
Column('name', Text),
Column('postcode', Text),
Column('city', Text),
Column('street', Text),
Column('state', Text),
Column('block', Text),
Column('commissioned_original', Text),
Column('commissioned', Float(53)),
Column('retrofit', Float(53)),
Column('shutdown', Float(53)),
Column('status', Text),
Column('fuel', Text),
Column('technology', Text),
Column('type', Text),
Column('eeg', Text),
Column('chp', Text),
Column('capacity', Float(53)),
Column('capacity_uba', Float(53)),
Column('chp_capacity_uba', Float(53)),
Column('efficiency_data', Float(53)),
Column('efficiency_estimate', Float(53)),
Column('network_node', Text),
Column('voltage', Text),
Column('network_operator', Text),
Column('name_uba', Text),
Column('lat', Float(53)),
Column('lon', Float(53)),
Column('comment', Text),
Column('geom', Geometry('POINT', 4326)),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('la_id', Integer),
Column('scenario', Text),
Column('flag', Text),
Column('nuts', String),
schema='model_draft'
)
t_ego_supply_conv_powerplant_sq_mview = Table(
'ego_supply_conv_powerplant_sq_mview', metadata,
Column('preversion', Text),
Column('id', Integer),
Column('bnetza_id', Text),
Column('company', Text),
Column('name', Text),
Column('postcode', Text),
Column('city', Text),
Column('street', Text),
Column('state', Text),
Column('block', Text),
Column('commissioned_original', Text),
Column('commissioned', Float(53)),
Column('retrofit', Float(53)),
Column('shutdown', Float(53)),
Column('status', Text),
Column('fuel', Text),
Column('technology', Text),
Column('type', Text),
Column('eeg', Text),
Column('chp', Text),
Column('capacity', Float(53)),
Column('capacity_uba', Float(53)),
Column('chp_capacity_uba', Float(53)),
Column('efficiency_data', Float(53)),
Column('efficiency_estimate', Float(53)),
Column('network_node', Text),
Column('voltage', Text),
Column('network_operator', Text),
Column('name_uba', Text),
Column('lat', Float(53)),
Column('lon', Float(53)),
Column('comment', Text),
Column('geom', Geometry('POINT', 4326)),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('la_id', Integer),
Column('scenario', Text),
Column('flag', Text),
Column('nuts', String),
schema='model_draft'
)
class EgoSupplyGenerator(Base):
__tablename__ = 'ego_supply_generator'
__table_args__ = {'schema': 'model_draft'}
un_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_supply_generator_un_id_seq'::regclass)"))
re_id = Column(Integer)
conv_id = Column(Integer)
aggr_id_pf = Column(Integer)
aggr_id_ms = Column(Integer)
geom = Column(Geometry('POINT', 4326), index=True)
class EgoSupplyGeneratorNep2035(Base):
__tablename__ = 'ego_supply_generator_nep2035'
__table_args__ = {'schema': 'model_draft'}
un_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_supply_generator_nep2035_un_id_seq'::regclass)"))
re_id = Column(BigInteger)
conv_id = Column(BigInteger)
aggr_id_pf = Column(BigInteger)
aggr_id_ms = Column(BigInteger)
geom = Column(Geometry('POINT', 4326), index=True)
class EgoSupplyGeneratorTest(Base):
__tablename__ = 'ego_supply_generator_test'
__table_args__ = {'schema': 'model_draft'}
un_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_supply_generator_test_un_id_seq'::regclass)"))
re_id = Column(Integer)
conv_id = Column(Integer)
aggr_id_pf = Column(Integer)
aggr_id_ms = Column(Integer)
geom = Column(Geometry('POINT', 4326), index=True)
class EgoSupplyRea(Base):
__tablename__ = 'ego_supply_rea'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True)
electrical_capacity = Column(Numeric)
generation_type = Column(Text)
generation_subtype = Column(String)
voltage_level = Column(SmallInteger)
postcode = Column(String)
source = Column(String)
subst_id = Column(BigInteger)
la_id = Column(Integer)
sort = Column(Integer)
flag = Column(String)
geom = Column(Geometry('POINT', 3035), index=True)
geom_line = Column(Geometry('LINESTRING', 3035), index=True)
geom_new = Column(Geometry('POINT', 3035), index=True)
t_ego_supply_rea_m1_1_mview = Table(
'ego_supply_rea_m1_1_mview', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
Column('w_id', BigInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035), index=True),
Column('rea_geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_supply_rea_m1_1_rest_mview = Table(
'ego_supply_rea_m1_1_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('geom', Geometry('POINT', 4326), index=True),
Column('rea_flag', String),
schema='model_draft'
)
t_ego_supply_rea_m1_2_mview = Table(
'ego_supply_rea_m1_2_mview', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
Column('w_id', BigInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035), index=True),
Column('rea_geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_supply_rea_m1_2_rest_mview = Table(
'ego_supply_rea_m1_2_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('geom', Geometry('POINT', 4326), index=True),
Column('rea_flag', String),
schema='model_draft'
)
t_ego_supply_rea_m2_mview = Table(
'ego_supply_rea_m2_mview', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
Column('w_id', BigInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035), index=True),
Column('rea_geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_supply_rea_m2_rest_mview = Table(
'ego_supply_rea_m2_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('geom', Geometry('POINT', 4326), index=True),
Column('rea_flag', String),
schema='model_draft'
)
class EgoSupplyReaM2Windfarm(Base):
__tablename__ = 'ego_supply_rea_m2_windfarm'
__table_args__ = {'schema': 'model_draft'}
farm_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_supply_rea_m2_windfarm_farm_id_seq'::regclass)"))
subst_id = Column(Integer)
area_ha = Column(Numeric)
dea_cnt = Column(Integer)
electrical_capacity_sum = Column(Numeric)
rea_geom_new = Column(Geometry('POLYGON', 3035))
rea_geom_line = Column(Geometry('LINESTRING', 3035))
geom = Column(Geometry('POLYGON', 3035), index=True)
t_ego_supply_rea_m3_mview = Table(
'ego_supply_rea_m3_mview', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
Column('w_id', BigInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035), index=True),
Column('rea_geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_supply_rea_m3_rest_mview = Table(
'ego_supply_rea_m3_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('geom', Geometry('POINT', 4326), index=True),
Column('rea_flag', String),
schema='model_draft'
)
t_ego_supply_rea_m4_mview = Table(
'ego_supply_rea_m4_mview', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
Column('w_id', BigInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035), index=True),
Column('rea_geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_supply_rea_m4_rest_mview = Table(
'ego_supply_rea_m4_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('geom', Geometry('POINT', 4326), index=True),
Column('rea_flag', String),
schema='model_draft'
)
t_ego_supply_rea_m5_mview = Table(
'ego_supply_rea_m5_mview', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
Column('w_id', BigInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035), index=True),
Column('rea_geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_supply_rea_m5_rest_2_mview = Table(
'ego_supply_rea_m5_rest_2_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('geom', Geometry('POINT', 4326), index=True),
Column('rea_flag', String),
schema='model_draft'
)
t_ego_supply_rea_m5_rest_mview = Table(
'ego_supply_rea_m5_rest_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', SmallInteger),
Column('subst_id', BigInteger),
Column('geom', Geometry, index=True),
Column('rea_flag', String),
schema='model_draft'
)
t_ego_supply_rea_out_mview = Table(
'ego_supply_rea_out_mview', metadata,
Column('id', BigInteger),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('voltage_level', SmallInteger),
Column('postcode', String),
Column('source', String),
Column('subst_id', BigInteger),
Column('la_id', Integer),
Column('sort', Integer),
Column('flag', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('geom_line', Geometry('LINESTRING', 3035), index=True),
Column('geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
class EgoSupplyReaPerGentypeAndVoltlevel(Base):
__tablename__ = 'ego_supply_rea_per_gentype_and_voltlevel'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True)
generation_type = Column(Text)
generation_subtype = Column(String)
voltage_level = Column(SmallInteger)
capacity = Column(Numeric)
count = Column(BigInteger)
class EgoSupplyReaPerLoadarea(Base):
__tablename__ = 'ego_supply_rea_per_loadarea'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
subst_id = Column(Integer)
lv_dea_cnt = Column(Integer)
lv_dea_capacity = Column(Numeric)
class EgoSupplyReaPerMethod(Base):
__tablename__ = 'ego_supply_rea_per_method'
__table_args__ = {'schema': 'model_draft'}
name = Column(Text, primary_key=True)
capacity = Column(Numeric)
count = Column(BigInteger)
class EgoSupplyReaPerMvgd(Base):
__tablename__ = 'ego_supply_rea_per_mvgd'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(Integer, primary_key=True)
dea_cnt = Column(Integer)
dea_capacity = Column(Numeric)
lv_dea_cnt = Column(Integer)
lv_dea_capacity = Column(Numeric)
mv_dea_cnt = Column(Integer)
mv_dea_capacity = Column(Numeric)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
class EgoSupplyRenewableBnetzaFullAttribute(Base):
__tablename__ = 'ego_supply_renewable_bnetza_full_attribute'
__table_args__ = {'schema': 'model_draft'}
gid = Column(Integer, primary_key=True)
meldedatum = Column(String(254))
meldegrund = Column(String(254))
anlagennummer = Column(String(254))
eeg_anlagenschluessel = Column(String(254))
genehmigungs_datum = Column(String(254))
genehmigungs_behoerde = Column(String(254))
genehmigungs_aktenzeichen = Column(String(254))
geplantes_inbetriebnahme_datum = Column(String(254))
errichtungs_frist = Column(String(254))
energietraeger = Column(String(254))
installierte_leistung = Column(String(254))
inst_leistung_vor_leistungs_aenderung = Column(Numeric)
inst_leistung_nach_leistungs_aenderung = Column(Numeric)
tatsächliche_inbetriebnahme = Column(String(254))
datum_leistungs_aenderung = Column(String(254))
stilllegungs_datum = Column(String(254))
name_der_anlage = Column(String(254))
strasse = Column(String(254))
hausnummer = Column(String(254))
plz = Column(String(254))
ort = Column(String(254))
gemeinde_schluessel = Column('gemeinde-schluessel', String(254))
bundesland = Column(String(254))
utm_zonenwert = Column('utm-zonenwert', String(254))
utm_east = Column('utm-east', Numeric)
utm_east_neu = Column('utm-east_neu', Numeric)
utm_north = Column('utm-north', Numeric)
zugehoehrigkeit_anlagenpark = Column(String(254))
name_des_anlagenparks = Column(String(254))
spannungsebene = Column(String(254))
netzanschlusspunkt = Column(String(254))
zaehlpunktbezeichnung = Column(String(254))
name_des_netzbetreibers = Column(String(254))
fernsteuerbarkeit_durch = Column(String(254))
gemeinsame_techn_einrichtung = Column(String(254))
inanspruchnahme_finanzieller_foerderung = Column(String(254))
eigenverbrauch_geplant = Column(String(254))
eingesetzte_biomasse = Column(String(254))
ausschliesslich_Biomasse = Column(String(254))
flexpraemie = Column(String(254))
erstmalige_inanspruchnahme_flexpraemie = Column(String(254))
leistungserhoehung_flexpraemie = Column(String(254))
datum_leistungserhoehung_flexpraemie = Column(String(254))
umfang_der_leistungserhoehung = Column(String(254))
erstmalig_ausschliesslich_biomethan = Column(String(254))
zustimmung_gesonderte_veröeffentlichung_biomethanstilllegung = Column(String(254))
kwk_anlage = Column(String(254))
thermische_leistung = Column(String(254))
andere_energietraeger = Column(String(254))
eingesetzte_andere_energietraeger = Column(String(254))
erstmalige_stromerzeugung = Column(String(254))
windanlagenhersteller = Column(String(254))
anlagentyp = Column(String(254))
nabenhoehe = Column(String(254))
rotordurchmesser = Column(String(254))
repowering = Column(String(254))
stillgelegt = Column(String(254))
_7_7_1_mitt = Column('7_7_1_mitt', String(254))
_7_7_2_form = Column('7_7_2_form', String(254))
_7_7_3_skal = Column('7_7_3_skal', Numeric)
_7_7_4_ertr = Column('7_7_4_ertr', Numeric)
_7_7_5_ertr = Column('7_7_5_ertr', Numeric)
_7_8_1_seel = Column('7_8_1_seel', String(254))
_7_8_2_wass = Column('7_8_2_wass', String(254))
_7_8_3_küs = Column('7_8_3_k\xfcs', String(254))
_8_1_ertüc = Column('8_1_ert\xfcc', String(254))
_8_2_art_de = Column('8_2_art_de', String(254))
_8_3_zulass = Column('8_3_zulass', String(254))
_8_4_höhe = Column('8_4_h\xf6he', String(254))
_8_5_datum = Column('8_5_datum', String(254))
_9_1_zuschl = Column('9_1_zuschl', String(254))
_9_2_wie_vi = Column('9_2_wie_vi', String(254))
_9_3_wie_vi = Column('9_3_wie_vi', String(254))
field_74 = Column(Numeric)
field_75 = Column(Numeric)
field_76 = Column(String(254))
field_77 = Column(String(254))
field_78 = Column(String(254))
field_79 = Column(String(254))
field_80 = Column(String(254))
field_81 = Column(String(254))
field_82 = Column(String(254))
field_83 = Column(String(254))
field_84 = Column(String(254))
field_85 = Column(Numeric)
field_86 = Column(Numeric(10, 0))
field_87 = Column(String(254))
field_88 = Column(String(254))
geom = Column(Geometry('POINT', 25832), index=True)
t_ego_supply_res_biomass_2035_temp = Table(
'ego_supply_res_biomass_2035_temp', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry, index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Text),
Column('rea_geom_new', Text),
Column('scenario', Text),
Column('flag', Text),
Column('nuts', String),
schema='model_draft'
)
t_ego_supply_res_biomass_2050_temp = Table(
'ego_supply_res_biomass_2050_temp', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035)),
Column('rea_geom_new', Geometry('POINT', 3035)),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
schema='model_draft'
)
t_ego_supply_res_chp_2050_temp = Table(
'ego_supply_res_chp_2050_temp', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035)),
Column('rea_geom_new', Geometry('POINT', 3035)),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
schema='model_draft'
)
t_ego_supply_res_hydro_2035_temp = Table(
'ego_supply_res_hydro_2035_temp', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Text),
Column('rea_geom_new', Text),
Column('scenario', Text),
Column('flag', Text),
Column('nuts', String),
schema='model_draft'
)
t_ego_supply_res_hydro_2050_temp = Table(
'ego_supply_res_hydro_2050_temp', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035)),
Column('rea_geom_new', Geometry('POINT', 3035)),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
schema='model_draft'
)
class EgoSupplyResPowerplant(Base):
__tablename__ = 'ego_supply_res_powerplant'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True)
start_up_date = Column(DateTime)
electrical_capacity = Column(Numeric)
generation_type = Column(Text)
generation_subtype = Column(String)
thermal_capacity = Column(Numeric)
city = Column(String)
postcode = Column(String)
address = Column(String)
lon = Column(Numeric)
lat = Column(Numeric)
gps_accuracy = Column(String)
validation = Column(String)
notification_reason = Column(String)
eeg_id = Column(String)
tso = Column(Float(53))
tso_eic = Column(String)
dso_id = Column(String)
dso = Column(String)
voltage_level_var = Column(String)
network_node = Column(String)
power_plant_id = Column(String)
source = Column(String)
comment = Column(String)
geom = Column(Geometry('POINT', 4326), index=True)
subst_id = Column(BigInteger)
otg_id = Column(BigInteger)
un_id = Column(BigInteger)
voltage_level = Column(SmallInteger)
la_id = Column(Integer)
mvlv_subst_id = Column(Integer)
rea_sort = Column(Integer)
rea_flag = Column(String)
rea_geom_line = Column(Geometry('LINESTRING', 3035))
rea_geom_new = Column(Geometry('POINT', 3035))
t_ego_supply_res_powerplant_ego100_mview = Table(
'ego_supply_res_powerplant_ego100_mview', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326)),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
Column('w_id', BigInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035)),
Column('rea_geom_new', Geometry('POINT', 3035)),
schema='model_draft'
)
t_ego_supply_res_powerplant_nep2035_mview = Table(
'ego_supply_res_powerplant_nep2035_mview', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326)),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
Column('w_id', BigInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035)),
Column('rea_geom_new', Geometry('POINT', 3035)),
schema='model_draft'
)
t_ego_supply_res_powerplant_out_mview = Table(
'ego_supply_res_powerplant_out_mview', metadata,
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035), index=True),
Column('rea_geom_new', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ego_supply_res_powerplant_sq_mview = Table(
'ego_supply_res_powerplant_sq_mview', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326)),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
Column('w_id', BigInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035)),
Column('rea_geom_new', Geometry('POINT', 3035)),
schema='model_draft'
)
class EgoSupplyResPv2035GermanyMunTemp(Base):
__tablename__ = 'ego_supply_res_pv_2035_germany_mun_temp'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.ego_supply_pv_dev_2035_germany_mun_id_seq'::regclass)"))
pv_units = Column(Integer)
pv_cap_2014 = Column(Integer)
pv_add_cap_2035 = Column(Integer)
voltage_level = Column(SmallInteger)
rs = Column(String(12))
pv_avg_cap = Column(Integer)
pv_new_units = Column(Integer)
class EgoSupplyResPv2050GermanyMunTemp(Base):
__tablename__ = 'ego_supply_res_pv_2050_germany_mun_temp'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.ego_supply_res_pv_2050_germany_mun_id_seq'::regclass)"))
pv_units = Column(Integer)
pv_cap_2035 = Column(Integer)
pv_add_cap_2050 = Column(Integer)
voltage_level = Column(SmallInteger)
rs = Column(String(12))
pv_avg_cap = Column(Integer)
pv_new_units = Column(Integer)
class EgoSupplyResPvToRegionTemp(Base):
__tablename__ = 'ego_supply_res_pv_to_region_temp'
__table_args__ = {'schema': 'model_draft'}
re_id = Column(BigInteger, primary_key=True)
subst_id = Column(BigInteger)
otg_id = Column(BigInteger)
un_id = Column(BigInteger)
nuts = Column(String(5))
rs = Column(String(12))
id_vg250 = Column(BigInteger)
class EgoSupplyResWo2035GermanyMunTemp(Base):
__tablename__ = 'ego_supply_res_wo_2035_germany_mun_temp'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.ego_supply_res_wo_2035_germany_mun_id_seq'::regclass)"))
wo_units = Column(Integer)
wo_cap_2014 = Column(Integer)
wo_add_cap_2035 = Column(Integer)
voltage_level = Column(SmallInteger)
rs = Column(String(12))
wo_avg_cap = Column(Integer)
wo_new_units = Column(Integer)
class EgoSupplyResWo2050GermanyMunTemp(Base):
__tablename__ = 'ego_supply_res_wo_2050_germany_mun_temp'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.ego_supply_res_wo_2050_germany_mun_id_seq'::regclass)"))
wo_units = Column(Integer)
wo_cap_2035 = Column(Integer)
wo_add_cap_2050 = Column(Integer)
voltage_level = Column(SmallInteger)
rs = Column(String(12))
wo_avg_cap = Column(Integer)
wo_new_units = Column(Integer)
t_ego_supply_res_woff_2035_temp = Table(
'ego_supply_res_woff_2035_temp', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035)),
Column('rea_geom_new', Geometry('POINT', 3035)),
schema='model_draft'
)
t_ego_supply_res_woff_2050_temp = Table(
'ego_supply_res_woff_2050_temp', metadata,
Column('preversion', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 3035), index=True),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035)),
Column('rea_geom_new', Geometry('POINT', 3035)),
Column('scenario', String),
Column('flag', String),
Column('nuts', String),
schema='model_draft'
)
class EgoSupplyScenarioCapacity(Base):
__tablename__ = 'ego_supply_scenario_capacities'
__table_args__ = {'schema': 'model_draft'}
state = Column(CHAR(50), nullable=False)
generation_type = Column(CHAR(25), primary_key=True, nullable=False)
capacity = Column(Numeric(15, 0))
nuts = Column(CHAR(12), primary_key=True, nullable=False)
scenario_name = Column(CHAR(50), primary_key=True, nullable=False)
class EgoSupplyWpaPerMvgd(Base):
__tablename__ = 'ego_supply_wpa_per_mvgd'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ego_supply_wpa_per_mvgd_id_seq'::regclass)"))
subst_id = Column(Integer)
area_ha = Column(Float(53))
geom = Column(Geometry('POLYGON', 3035), index=True)
class EgoWeatherMeasurementPoint(Base):
__tablename__ = 'ego_weather_measurement_point'
__table_args__ = {'schema': 'model_draft'}
coastdat_id = Column(BigInteger, primary_key=True, nullable=False)
type_of_generation = Column(Text, primary_key=True, nullable=False)
geom = Column(Geometry('POINT', 4326))
t_ev_charging_berlin_parking_poly = Table(
'ev_charging_berlin_parking_poly', metadata,
Column('osm_id', BigInteger),
Column('tags', HSTORE(Text())),
Column('geom', Geometry, index=True),
schema='model_draft'
)
t_ev_charging_berlin_parking_pts = Table(
'ev_charging_berlin_parking_pts', metadata,
Column('osm_id', BigInteger),
Column('tags', HSTORE(Text())),
Column('geom', Geometry),
schema='model_draft'
)
t_ev_charging_berlin_shops_pts = Table(
'ev_charging_berlin_shops_pts', metadata,
Column('osm_id', BigInteger),
Column('tags', HSTORE(Text())),
Column('geom', Geometry),
schema='model_draft'
)
t_ev_charging_bonn_poi_points = Table(
'ev_charging_bonn_poi_points', metadata,
Column('id', BigInteger),
Column('geom', Geometry('POINT', 3035), index=True),
Column('amenity', String(50)),
Column('weight', Integer),
schema='model_draft'
)
t_ev_charging_bonn_shops = Table(
'ev_charging_bonn_shops', metadata,
Column('id', BigInteger),
Column('geom', Geometry('POINT', 3035)),
Column('amenity', String(50)),
schema='model_draft'
)
t_ev_charging_bonn_shops_clusters = Table(
'ev_charging_bonn_shops_clusters', metadata,
Column('npoints', Integer),
Column('geom', Geometry),
schema='model_draft'
)
class EvChargingBrandenburgStreetseg(Base):
__tablename__ = 'ev_charging_brandenburg_streetsegs'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_brandenburg_streetsegs_id_seq'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Numeric)
t_ev_charging_buildings_berlin = Table(
'ev_charging_buildings_berlin', metadata,
Column('gid', Integer, unique=True),
Column('geom', Geometry, index=True),
schema='model_draft'
)
class EvChargingCandidatepoint(Base):
__tablename__ = 'ev_charging_candidatepoints'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_candidatepoints_id_seq'::regclass)"))
geom = Column(Geometry('POINT', 3035), index=True)
class EvChargingCensusblocksSpiekeroog(Base):
__tablename__ = 'ev_charging_censusblocks_spiekeroog'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_censusblocks_spiekeroog_id_seq'::regclass)"))
population = Column(Integer)
geom = Column(Geometry('POLYGON', 3035))
t_ev_charging_chosenpoints = Table(
'ev_charging_chosenpoints', metadata,
Column('id', Integer),
Column('geom', Geometry('POINT', 3035), index=True),
Column('current_cp', Boolean),
schema='model_draft'
)
t_ev_charging_coverage_bundesland = Table(
'ev_charging_coverage_bundesland', metadata,
Column('osm_id', BigInteger),
Column('geom', Geometry),
schema='model_draft'
)
class EvChargingDistrict(Base):
__tablename__ = 'ev_charging_districts'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_districts_id_seq'::regclass)"))
district_name = Column(String(50))
geom = Column(Geometry('MULTIPOLYGON', 4326))
t_ev_charging_essen_charging_points = Table(
'ev_charging_essen_charging_points', metadata,
Column('ge_id', Text),
Column('lat', Float(53)),
Column('lng', Float(53)),
Column('charger_count', Integer),
Column('power_kwh', Float(53)),
Column('charger_type', Text),
Column('geom', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_ev_charging_essen_districtdata = Table(
'ev_charging_essen_districtdata', metadata,
Column('index', BigInteger, index=True),
Column('Name', Text),
Column('geom', Geometry('MULTIPOLYGON', 3035), index=True),
Column('Wohnungsbezogene Charakterisierung', Text),
Column('Sozialraeumliche Charakterisierung', Text),
Column('Mietspiegel', Float(53)),
Column('Flaeche [m2]', BigInteger),
Column('bebaute Flaeche', BigInteger),
Column('Bevoelkerung', BigInteger),
Column('Anteil 18-25', Float(53)),
Column('Anteil 25-45', Float(53)),
Column('Anteil 45-65', Float(53)),
Column('Durchschnittsalter [a]', Float(53)),
Column('Privathaushalte', BigInteger),
Column('Einpersonenhaushalte', BigInteger),
Column('Haushalte mit minderj. Kindern', BigInteger),
Column('durchschn. Haushaltsgroe\xdfe', Float(53)),
Column('Geb/Sterb', BigInteger),
Column('Wanderung', BigInteger),
Column('Wohnungen je Gebaeude', Float(53)),
Column('Wohnflaeche [m2]', BigInteger),
Column('Pkw', BigInteger),
Column('Pkw natuerlicher Personen', BigInteger),
Column('sozpflichtig beschaeftigte', BigInteger),
Column('Arbeitslose', BigInteger),
Column('Gruenwaehler', BigInteger),
schema='model_draft'
)
t_ev_charging_geom_essen = Table(
'ev_charging_geom_essen', metadata,
Column('geom', Geometry),
schema='model_draft'
)
t_ev_charging_geom_essen2 = Table(
'ev_charging_geom_essen2', metadata,
Column('geom', Geometry('MULTIPOLYGON', 31467)),
schema='model_draft'
)
t_ev_charging_geom_potsdam = Table(
'ev_charging_geom_potsdam', metadata,
Column('geom', Geometry('MULTIPOLYGON', 31467)),
schema='model_draft'
)
t_ev_charging_geom_spiekeroog = Table(
'ev_charging_geom_spiekeroog', metadata,
Column('geom', Geometry),
schema='model_draft'
)
t_ev_charging_giessen_poi_points_ = Table(
'ev_charging_giessen_poi_points_', metadata,
Column('id', BigInteger),
Column('geom', Geometry('POINT', 3035), index=True),
Column('amenity', String(50)),
Column('weight', Integer),
schema='model_draft'
)
t_ev_charging_giessen_poi_points__clusters = Table(
'ev_charging_giessen_poi_points__clusters', metadata,
Column('npoints', Integer),
Column('geom', Geometry, index=True),
schema='model_draft'
)
class EvChargingGiessenStreet(Base):
__tablename__ = 'ev_charging_giessen_streets'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.\"ev_charging_gießen_streets_id_seq\"'::regclass)"))
osm_id = Column(Integer)
geom = Column(Geometry('LINESTRING', 3035), index=True)
length = Column(Numeric)
highway = Column(HSTORE(Text()))
class EvChargingGiessenStreetsSegmented(Base):
__tablename__ = 'ev_charging_giessen_streets_segmented'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.\"ev_charging_gießen_streets_segmented_id_seq\"'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035), index=True)
length = Column(Float(53))
t_ev_charging_nodebetweenness = Table(
'ev_charging_nodebetweenness', metadata,
Column('node_id', Integer),
Column('x', Float(53)),
Column('node_betweenness', Float(53)),
Column('y', Float(53)),
Column('geom', Geometry('POINT', 3035)),
schema='model_draft'
)
t_ev_charging_nxcoverage_ = Table(
'ev_charging_nxcoverage_', metadata,
Column('index', BigInteger, index=True),
Column('status', Text),
Column('weight', Float(53)),
Column('x1', Float(53)),
Column('x2', Float(53)),
Column('y1', Float(53)),
Column('y2', Float(53)),
Column('geom', Geometry('LINESTRING', 3035)),
schema='model_draft'
)
t_ev_charging_nxcoverage_Friedland = Table(
'ev_charging_nxcoverage_Friedland', metadata,
Column('index', BigInteger, index=True),
Column('status', Text),
Column('weight', Float(53)),
Column('x1', Float(53)),
Column('x2', Float(53)),
Column('y1', Float(53)),
Column('y2', Float(53)),
Column('geom', Geometry('LINESTRING', 3035)),
schema='model_draft'
)
t_ev_charging_nxcoverage_Gießen = Table(
'ev_charging_nxcoverage_Gie\xdfen', metadata,
Column('index', BigInteger, index=True),
Column('status', Text),
Column('weight', Float(53)),
Column('x1', Float(53)),
Column('x2', Float(53)),
Column('y1', Float(53)),
Column('y2', Float(53)),
schema='model_draft'
)
t_ev_charging_nxcoverage_friedland = Table(
'ev_charging_nxcoverage_friedland', metadata,
Column('index', BigInteger, index=True),
Column('status', Text),
Column('weight', Float(53)),
Column('x1', Float(53)),
Column('x2', Float(53)),
Column('y1', Float(53)),
Column('y2', Float(53)),
Column('geom', Geometry('LINESTRING', 3035)),
schema='model_draft'
)
t_ev_charging_nxcoverage_gießen = Table(
'ev_charging_nxcoverage_gie\xdfen', metadata,
Column('index', BigInteger, index=True),
Column('status', Text),
Column('weight', Float(53)),
Column('x1', Float(53)),
Column('x2', Float(53)),
Column('y1', Float(53)),
Column('y2', Float(53)),
Column('geom', Geometry('LINESTRING', 3035)),
schema='model_draft'
)
t_ev_charging_nxcoverage_winterberg = Table(
'ev_charging_nxcoverage_winterberg', metadata,
Column('index', BigInteger, index=True),
Column('status', Text),
Column('weight', Float(53)),
Column('x1', Float(53)),
Column('x2', Float(53)),
Column('y1', Float(53)),
Column('y2', Float(53)),
Column('geom', Geometry('LINESTRING', 3035)),
schema='model_draft'
)
t_ev_charging_parking_points_berlin = Table(
'ev_charging_parking_points_berlin', metadata,
Column('id', BigInteger),
Column('geom', Geometry('POINT', 3035)),
Column('amenity', String(50)),
schema='model_draft'
)
t_ev_charging_parking_polygons_berlin = Table(
'ev_charging_parking_polygons_berlin', metadata,
Column('id', BigInteger),
Column('geom', Geometry('POLYGON', 3035)),
Column('amenity', String(50)),
schema='model_draft'
)
t_ev_charging_poi = Table(
'ev_charging_poi', metadata,
Column('osm_id', BigInteger),
Column('geom', Geometry('POINT', 3035)),
Column('amenity', Text),
Column('weight', Integer),
schema='model_draft'
)
class EvChargingPoiCluster(Base):
__tablename__ = 'ev_charging_poi_clusters'
__table_args__ = {'schema': 'model_draft'}
npoints = Column(Integer)
geom = Column(Geometry)
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_poi_clusters_id_seq'::regclass)"))
t_ev_charging_poi_point = Table(
'ev_charging_poi_point', metadata,
Column('id', BigInteger),
Column('geom', Geometry('POINT', 3035)),
Column('amenity', String(50)),
schema='model_draft'
)
t_ev_charging_poi_points_berlin = Table(
'ev_charging_poi_points_berlin', metadata,
Column('id', BigInteger),
Column('geom', Geometry('POINT', 3035)),
Column('amenity', String(50)),
Column('weight', Integer),
schema='model_draft'
)
class EvChargingPoiPointsBerlinCluster(Base):
__tablename__ = 'ev_charging_poi_points_berlin_clusters'
__table_args__ = {'schema': 'model_draft'}
npoints = Column(Integer)
geom = Column(Geometry)
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_poi_points_berlin_clusters_id_seq'::regclass)"))
t_ev_charging_poi_points_essen = Table(
'ev_charging_poi_points_essen', metadata,
Column('id', BigInteger),
Column('geom', Geometry('POINT', 3035)),
Column('amenity', String(50)),
schema='model_draft'
)
t_ev_charging_poi_points_giessen = Table(
'ev_charging_poi_points_giessen', metadata,
Column('id', BigInteger),
Column('geom', Geometry('POINT', 3035)),
Column('amenity', String(50)),
schema='model_draft'
)
t_ev_charging_poi_points_potsdam = Table(
'ev_charging_poi_points_potsdam', metadata,
Column('id', BigInteger),
Column('geom', Geometry('POINT', 3035)),
Column('amenity', String(50)),
Column('weight', Integer),
schema='model_draft'
)
class EvChargingPoiPointsPotsdamCluster(Base):
__tablename__ = 'ev_charging_poi_points_potsdam_clusters'
__table_args__ = {'schema': 'model_draft'}
npoints = Column(Integer)
geom = Column(Geometry)
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_poi_points_potsdam_clusters_id_seq'::regclass)"))
t_ev_charging_poi_polygon = Table(
'ev_charging_poi_polygon', metadata,
Column('id', Integer),
Column('geom', Geometry('POLYGON', 3035)),
Column('amenity', String(50)),
schema='model_draft'
)
t_ev_charging_poi_polygon_essen = Table(
'ev_charging_poi_polygon_essen', metadata,
Column('id', BigInteger),
Column('geom', Geometry('POLYGON', 3035)),
Column('amenity', String(50)),
schema='model_draft'
)
t_ev_charging_poi_test = Table(
'ev_charging_poi_test', metadata,
Column('osm_id', BigInteger),
Column('geom', Geometry('POINT', 3035)),
Column('amenity', Text),
schema='model_draft'
)
t_ev_charging_population_per_ha_berlin = Table(
'ev_charging_population_per_ha_berlin', metadata,
Column('gid', Integer),
Column('geom', Geometry('POLYGON', 3035)),
Column('population', Numeric(10, 0)),
Column('perc_builtup_ha', Float(53)),
Column('builtup_m2_per_person', Float(53)),
schema='model_draft'
)
t_ev_charging_preprocess_segments = Table(
'ev_charging_preprocess_segments', metadata,
Column('id', BigInteger),
Column('geom', Text),
schema='model_draft'
)
t_ev_charging_saarland_streets_ = Table(
'ev_charging_saarland_streets_', metadata,
Column('id', BigInteger),
Column('geom', Geometry, index=True),
schema='model_draft'
)
class EvChargingSaarlandStreetsSegmented(Base):
__tablename__ = 'ev_charging_saarland_streets__segmented'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_saarland_streets__segmented_id_seq'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035), index=True)
length = Column(Float(53))
class EvChargingStreetSegment(Base):
__tablename__ = 'ev_charging_street_segments'
__table_args__ = {'schema': 'model_draft'}
geom_startpt = Column(Geometry)
geom_endpt = Column(Geometry)
geom = Column(Geometry)
length = Column(Float(53))
startpt_id = Column(Integer, nullable=False, server_default=text("nextval('model_draft.ev_charging_street_segments_startpt_id_seq'::regclass)"))
endpt_id = Column(Integer, nullable=False, server_default=text("nextval('model_draft.ev_charging_street_segments_endpt_id_seq'::regclass)"))
line_id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_street_segments_line_id_seq'::regclass)"))
class EvChargingStreet(Base):
__tablename__ = 'ev_charging_streets'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_id_seq'::regclass)"))
osm_id = Column(Integer)
geom = Column(Geometry('LINESTRING', 3035), index=True)
length = Column(Numeric)
class EvChargingStreetsBerlin(Base):
__tablename__ = 'ev_charging_streets_berlin'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_berlin_id_seq'::regclass)"))
osm_id = Column(Integer)
geom = Column(Geometry('LINESTRING', 3035), index=True)
length = Column(Numeric)
highway = Column(HSTORE(Text()))
class EvChargingStreetsBerlinSegmented(Base):
__tablename__ = 'ev_charging_streets_berlin_segmented'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_berlin_segmented_id_seq'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Float(53))
t_ev_charging_streets_brandenburg = Table(
'ev_charging_streets_brandenburg', metadata,
Column('id', BigInteger),
Column('geom', Geometry),
schema='model_draft'
)
class EvChargingStreetsBrandenburgSegmented(Base):
__tablename__ = 'ev_charging_streets_brandenburg_segmented'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_brandenburg_segmented_id_seq'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Float(53))
class EvChargingStreetsDittwar(Base):
__tablename__ = 'ev_charging_streets_dittwar'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_dittwar_id_seq'::regclass)"))
osm_id = Column(Integer)
geom = Column(Geometry('LINESTRING', 3035), index=True)
length = Column(Numeric)
highway = Column(HSTORE(Text()))
class EvChargingStreetsDittwarSegmented(Base):
__tablename__ = 'ev_charging_streets_dittwar_segmented'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_dittwar_segmented_id_seq'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Float(53))
class EvChargingStreetsEssen(Base):
__tablename__ = 'ev_charging_streets_essen'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_essen_id_seq'::regclass)"))
osm_id = Column(Integer)
geom = Column(Geometry('LINESTRING', 3035), index=True)
length = Column(Numeric)
highway = Column(HSTORE(Text()))
class EvChargingStreetsEssenSegmented(Base):
__tablename__ = 'ev_charging_streets_essen_segmented'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_essen_segmented_id_seq'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Float(53))
class EvChargingStreetsFriedland(Base):
__tablename__ = 'ev_charging_streets_friedland'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_friedland_id_seq'::regclass)"))
osm_id = Column(Integer)
geom = Column(Geometry('LINESTRING', 3035), index=True)
length = Column(Numeric)
highway = Column(HSTORE(Text()))
class EvChargingStreetsFriedlandSegmented(Base):
__tablename__ = 'ev_charging_streets_friedland_segmented'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_friedland_segmented_id_seq'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Float(53))
class EvChargingStreetsGiessen(Base):
__tablename__ = 'ev_charging_streets_giessen'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.\"ev_charging_streets_gießen_id_seq\"'::regclass)"))
osm_id = Column(Integer)
geom = Column(Geometry('LINESTRING', 3035), index=True)
length = Column(Numeric)
highway = Column(HSTORE(Text()))
class EvChargingStreetsGiessenSegmented(Base):
__tablename__ = 'ev_charging_streets_giessen_segmented'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.\"ev_charging_streets_gießen_segmented_id_seq\"'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Float(53))
t_ev_charging_streets_hessen = Table(
'ev_charging_streets_hessen', metadata,
Column('id', BigInteger),
Column('geom', Geometry),
schema='model_draft'
)
class EvChargingStreetsHessenSegmented(Base):
__tablename__ = 'ev_charging_streets_hessen_segmented'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_hessen_segmented_id_seq'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Float(53))
t_ev_charging_streets_saarland = Table(
'ev_charging_streets_saarland', metadata,
Column('id', BigInteger),
Column('geom', Geometry),
schema='model_draft'
)
class EvChargingStreetsSaarlandSegmented(Base):
__tablename__ = 'ev_charging_streets_saarland_segmented'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_saarland_segmented_id_seq'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Float(53))
class EvChargingStreetsSpiekeroog(Base):
__tablename__ = 'ev_charging_streets_spiekeroog'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_spiekeroog_id_seq'::regclass)"))
osm_id = Column(Integer)
geom = Column(Geometry('LINESTRING', 3035), index=True)
length = Column(Numeric)
highway = Column(HSTORE(Text()))
class EvChargingStreetsSpiekeroogSegmented(Base):
__tablename__ = 'ev_charging_streets_spiekeroog_segmented'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streets_spiekeroog_segmented_id_seq'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Float(53))
t_ev_charging_streetsbrandenburg = Table(
'ev_charging_streetsbrandenburg', metadata,
Column('osm_id', BigInteger),
Column('geom', Geometry),
schema='model_draft'
)
class EvChargingStreetseg(Base):
__tablename__ = 'ev_charging_streetsegs'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streetsegs_id_seq1'::regclass)"))
x1 = Column(Float(53))
y1 = Column(Float(53))
x2 = Column(Float(53))
y2 = Column(Float(53))
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Numeric)
class EvChargingStreetsegsBrandenburg(Base):
__tablename__ = 'ev_charging_streetsegs_brandenburg'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_streetsegs_id_seq'::regclass)"))
x1 = Column(Numeric)
y1 = Column(Numeric)
x2 = Column(Numeric)
y2 = Column(Numeric)
geom = Column(Geometry('LINESTRING', 3035))
length = Column(Numeric)
class EvChargingTestInsertWitha(Base):
__tablename__ = 'ev_charging_test_insert_withas'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_test_insert_withas_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON', 3035))
t_ev_charging_testpandasupload = Table(
'ev_charging_testpandasupload', metadata,
Column('index', BigInteger, index=True),
Column('Name', Text),
Column('Koordinaten', Text),
schema='model_draft'
)
t_ev_charging_top_censusblocks = Table(
'ev_charging_top_censusblocks', metadata,
Column('cb_id', Integer),
Column('district_name', String(50)),
Column('population', Integer),
Column('geom', Geometry('POLYGON', 3035)),
Column('lng', Float(53)),
Column('lat', Float(53)),
schema='model_draft'
)
t_ev_charging_top_censusblocks_giessen = Table(
'ev_charging_top_censusblocks_giessen', metadata,
Column('cb_id', Integer),
Column('district_name', String(50)),
Column('population', Integer),
Column('geom', Geometry('POLYGON', 3035)),
Column('lng', Float(53)),
Column('lat', Float(53)),
schema='model_draft'
)
class EvChargingWinterbergStreet(Base):
__tablename__ = 'ev_charging_winterberg_streets'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ev_charging_winterberg_streets_id_seq'::regclass)"))
osm_id = Column(Integer)
geom = Column(Geometry('LINESTRING', 3035), index=True)
length = Column(Numeric)
highway = Column(HSTORE(Text()))
t_ev_charging_x2test_graphtotable = Table(
'ev_charging_x2test_graphtotable', metadata,
Column('node_id', Integer),
Column('x', Float(53)),
Column('y', Float(53)),
schema='model_draft'
)
t_ev_charging_xtest_graphtotable = Table(
'ev_charging_xtest_graphtotable', metadata,
Column('node_id1', Integer),
Column('node_id2', Integer),
Column('weight', Float(53)),
schema='model_draft'
)
t_ev_charging_xxtestedges = Table(
'ev_charging_xxtestedges', metadata,
Column('node_id1', Integer),
Column('node_id2', Integer),
Column('edge_betweenness', Float(53)),
Column('y1', Float(53)),
Column('x2', Float(53)),
Column('weight', Float(53)),
Column('y2', Float(53)),
Column('x1', Float(53)),
Column('geom', Geometry('LINESTRING', 3035)),
schema='model_draft'
)
t_ev_charging_xxtestnodes = Table(
'ev_charging_xxtestnodes', metadata,
Column('node_id', Integer),
Column('x', Float(53)),
Column('y', Float(53)),
Column('node_betweenness', Float(53)),
Column('geom', Geometry('POINT', 3035)),
schema='model_draft'
)
t_ev_charging_xxxx = Table(
'ev_charging_xxxx', metadata,
Column('node_id', Integer),
Column('var2', Integer),
Column('betweenness', Float(53)),
Column('var3', Float(53)),
Column('var4', Integer),
schema='model_draft'
)
t_ev_charging_xxxx_edges = Table(
'ev_charging_xxxx_edges', metadata,
Column('node_id1', Integer),
Column('nodeid2', Integer),
schema='model_draft'
)
class ExampleApiTable(Base):
__tablename__ = 'example_api_table'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.example_api_table_id_seq'::regclass)"))
name = Column(String(50))
type = Column(String(20))
capacity = Column(Numeric)
lat = Column(Numeric)
lon = Column(Numeric)
t_feasability_check = Table(
'feasability_check', metadata,
Column('results', String),
schema='model_draft'
)
t_fred_dp_hydropower_mview = Table(
'fred_dp_hydropower_mview', metadata,
Column('hydropower_id', BigInteger, unique=True),
Column('postcode', Text),
Column('city', Text),
Column('capacity', Float(53)),
Column('voltage_level', SmallInteger),
Column('source', Text),
Column('geom', Geometry('POINT', 3035), index=True),
schema='model_draft'
)
t_fred_dp_hydropower_on_river_mview = Table(
'fred_dp_hydropower_on_river_mview', metadata,
Column('hydropower_id', BigInteger, unique=True),
Column('postcode', Text),
Column('city', Text),
Column('capacity', Float(53)),
Column('voltage_level', SmallInteger),
Column('source', Text),
Column('river_id', Integer),
Column('riversystem_id', Integer),
Column('gwk', String),
Column('nam', String(100)),
Column('geom', Geometry('POINT', 3035), index=True),
Column('geom_line', Geometry('LINESTRING', 3035), index=True),
Column('distance', Float(53)),
schema='model_draft'
)
t_fred_dp_river_mview = Table(
'fred_dp_river_mview', metadata,
Column('river_id', Integer, unique=True),
Column('riversystem_id', Integer),
Column('gwk', String),
Column('nam', String(100)),
Column('geom', Geometry('MULTILINESTRING', 3035), index=True),
schema='model_draft'
)
t_fred_dp_river_systems_mview = Table(
'fred_dp_river_systems_mview', metadata,
Column('riversystem_id', Integer, unique=True),
Column('riversystem_name', Text),
Column('river_cnt', BigInteger),
Column('river_lenght', Float(53)),
Column('geom', Geometry('MULTILINESTRING', 3035), index=True),
schema='model_draft'
)
t_fred_dp_river_with_hydropower_mview = Table(
'fred_dp_river_with_hydropower_mview', metadata,
Column('river_id', Integer, unique=True),
Column('capacity_sum', Float(53)),
Column('count', BigInteger),
Column('gwk', String),
Column('riversystem_id', Integer),
Column('riversystem_name', Text),
Column('nam', String(100)),
Column('geom', Geometry('MULTILINESTRING', 3035), index=True),
schema='model_draft'
)
class IoerUrbanShareIndustrial(Base):
__tablename__ = 'ioer_urban_share_industrial'
__table_args__ = (
CheckConstraint('(public.st_scalex(rast))::numeric(16,10) = (100)::numeric(16,10)'),
CheckConstraint("(public.st_scaley(rast))::numeric(16,10) = ('-100'::integer)::numeric(16,10)"),
CheckConstraint("public._raster_constraint_out_db(rast) = '{f}'::boolean[]"),
CheckConstraint("public._raster_constraint_pixel_types(rast) = '{32BF}'::text[]"),
CheckConstraint('public.st_height(rast) = 500'),
CheckConstraint('public.st_numbands(rast) = 1'),
CheckConstraint('public.st_srid(rast) = 3035'),
CheckConstraint('public.st_width(rast) = 500'),
CheckConstraint("ublic.st_coveredby(public.st_convexhull(rast), '0103000020DB0B000001000000430000000000000028E64E4100000000C83744410000000080844E4100000000C83744410000000080844E4100000000709944410000000080844E410000000018FB44410000000080844E4100000000C05C45410000000080844E410000000068BE45410000000080844E4100000000102046410000000080844E4100000000B88146410000000080844E410000000060E346410000000080844E4100000000084547410000000080844E4100000000B0A647410000000080844E4100000000580848410000000080844E4100000000006A48410000000080844E4100000000A8CB48410000000080844E4100000000502D49410000000080844E4100000000F88E49410000000080844E4100000000A0F049410000000080844E410000000048524A410000000080844E4100000000F0B34A410000000080844E410000000098154B410000000080844E410000000040774B410000000028E64E410000000040774B4100000000D0474F410000000040774B410000000078A94F410000000040774B4100000000900550410000000040774B4100000000643650410000000040774B4100000000386750410000000040774B41000000000C9850410000000040774B4100000000E0C850410000000040774B4100000000B4F950410000000040774B4100000000882A51410000000040774B41000000005C5B51410000000040774B4100000000308C51410000000040774B410000000004BD51410000000040774B4100000000D8ED51410000000040774B4100000000D8ED51410000000098154B4100000000D8ED514100000000F0B34A4100000000D8ED51410000000048524A4100000000D8ED514100000000A0F0494100000000D8ED514100000000F88E494100000000D8ED514100000000502D494100000000D8ED514100000000A8CB484100000000D8ED514100000000006A484100000000D8ED5141000000005808484100000000D8ED514100000000B0A6474100000000D8ED5141000000000845474100000000D8ED51410000000060E3464100000000D8ED514100000000B881464100000000D8ED5141000000001020464100000000D8ED51410000000068BE454100000000D8ED514100000000C05C454100000000D8ED51410000000018FB444100000000D8ED5141000000007099444100000000D8ED514100000000C83744410000000004BD514100000000C837444100000000308C514100000000C8374441000000005C5B514100000000C837444100000000882A514100000000C837444100000000B4F9504100000000C837444100000000E0C8504100000000C8374441000000000C98504100000000C8374441000000003867504100000000C8374441000000006436504100000000C8374441000000009005504100000000C83744410000000078A94F4100000000C837444100000000D0474F4100000000C83744410000000028E64E4100000000C8374441'::public.geometry"),
CheckConstraint("ublic.st_iscoveragetile(rast, '0100000000000000000000594000000000000059C00000000080844E410000000040774B4100000000000000000000000000000000DB0B0000581B1C25'::public.raster, 500, 500"),
CheckConstraint("ublic.st_samealignment(rast, '0100000000000000000000594000000000000059C00000000080844E410000000040774B4100000000000000000000000000000000DB0B000001000100'::public.raster"),
{'schema': 'model_draft'}
)
rid = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ioer_urban_share_industrial_rid_seq'::regclass)"))
rast = Column(Raster)
class IoerUrbanShareIndustrialCentroid(Base):
__tablename__ = 'ioer_urban_share_industrial_centroid'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.ioer_urban_share_industrial_centroid_id_seq'::regclass)"))
rid = Column(Integer)
ioer_share = Column(Numeric)
geom = Column(Geometry('POINT', 3035), index=True)
class LanduseCalc(Base):
__tablename__ = 'landuse_calc'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True)
source = Column(Text)
attribute = Column(Text)
count_int = Column(Integer)
area_ha = Column(Numeric(15, 1))
class LisCharging(Base):
__tablename__ = 'lis_charging'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.lis_charging_id_seq'::regclass)"))
geom = Column(Geometry('POINT', 3035), index=True)
desc = Column(String)
class LisChargingGe(Base):
__tablename__ = 'lis_charging_ge'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.lis_charging_ge_id_seq'::regclass)"))
ge_id = Column(BigInteger)
geom = Column(Geometry('POINT', 3035), index=True)
name = Column(String)
street = Column(String)
postcode = Column(String)
city = Column(String)
count = Column(Integer)
power = Column(Float(53))
type = Column(String)
desc = Column(String)
class LisChargingPoi(Base):
__tablename__ = 'lis_charging_poi'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.lis_charging_poi_id_seq'::regclass)"))
geom = Column(Geometry('POINT', 3035), index=True)
osm_id = Column(BigInteger)
amenity = Column(String)
name = Column(String)
category = Column(SmallInteger)
grid_id = Column(BigInteger)
potential = Column(Float(53))
covered_by = Column(BigInteger)
region = Column(BigInteger)
class LisChargingStreet(Base):
__tablename__ = 'lis_charging_streets'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.lis_charging_streets_id_seq'::regclass)"))
geom = Column(Geometry('MULTILINESTRING', 3035), index=True)
geom_string = Column(String)
desc = Column(String)
class NepSupplyConvPowerplantNep2015(Base):
__tablename__ = 'nep_supply_conv_powerplant_nep2015'
__table_args__ = {'schema': 'model_draft'}
bnetza_id = Column(String)
tso = Column(String)
power_plant_name = Column(String)
unit_name = Column(String)
postcode = Column(String)
state = Column(String)
commissioning = Column(Integer)
chp = Column(String)
fuel = Column(String)
rated_power = Column(Numeric)
rated_power_a2025 = Column(Numeric)
rated_power_b2025 = Column(Numeric)
rated_power_b2035 = Column(Numeric)
rated_power_c2025 = Column(Numeric)
lat = Column(Float(53))
lon = Column(Float(53))
location_checked = Column(Text)
geom = Column(Geometry('POINT', 4326))
gid = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.nep_supply_conv_powerplant_nep2015_seq'::regclass)"))
class OepMetadataTableExampleV13(Base):
__tablename__ = 'oep_metadata_table_example_v13'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.oep_metadata_table_example_v13_id_seq'::regclass)"))
year = Column(Integer)
value = Column(Float(53))
geom = Column(Geometry('POINT', 4326), index=True)
class OepMetadataTableExampleV14(Base):
__tablename__ = 'oep_metadata_table_example_v14'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.oep_metadata_table_example_v14_id_seq'::regclass)"))
year = Column(Integer)
value = Column(Float(53))
geom = Column(Geometry('POINT', 4326), index=True)
t_offshore_feedin_foreign = Table(
'offshore_feedin_foreign', metadata,
Column('generator_id', BigInteger),
Column('scn_name', String),
Column('feedin', ARRAY(Float(precision=53))),
schema='model_draft'
)
class OpenfredLocation(Base):
__tablename__ = 'openfred_locations'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.openfred_locations_id_seq'::regclass)"))
point = Column(Geometry('POINT', 4326), unique=True)
class OpenfredTimestamp(Base):
__tablename__ = 'openfred_timestamps'
__table_args__ = (
CheckConstraint('(id = 1) OR ((start IS NOT NULL) AND (stop IS NOT NULL))'),
UniqueConstraint('start', 'stop'),
{'schema': 'model_draft'}
)
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.openfred_timestamps_id_seq'::regclass)"))
start = Column(DateTime)
stop = Column(DateTime)
class OpenfredVariable(Base):
__tablename__ = 'openfred_variables'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.openfred_variables_id_seq'::regclass)"))
name = Column(String(255), nullable=False, unique=True)
type = Column(String(37))
aggregation = Column(String(255))
description = Column(Text)
standard_name = Column(String(255))
class OpenfredFlag(OpenfredVariable):
__tablename__ = 'openfred_flags'
__table_args__ = {'schema': 'model_draft'}
id = Column(ForeignKey('model_draft.openfred_variables.id'), primary_key=True)
flag_ks = Column(ARRAY(Integer()), nullable=False)
flag_vs = Column(ARRAY(String(length=37)), nullable=False)
class Openfredgrid(Base):
__tablename__ = 'openfredgrid'
__table_args__ = {'schema': 'model_draft'}
gid = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.openfredgrid_gid_seq'::regclass)"))
geom = Column(Geometry('MULTIPOLYGON', 4326), index=True)
t_opsd_hourly_timeseries = Table(
'opsd_hourly_timeseries', metadata,
Column('timestamp', DateTime),
Column('load_at', Numeric(12, 2)),
Column('load_ba', Numeric(12, 2)),
Column('load_be', Numeric(12, 2)),
Column('load_bg', Numeric(12, 2)),
Column('load_ch', Numeric(12, 2)),
Column('load_cs', Numeric(12, 2)),
Column('load_cy', Numeric(12, 2)),
Column('load_cz', Numeric(12, 2)),
Column('load_de', Numeric(12, 2)),
Column('load_dk', Numeric(12, 2)),
Column('load_dkw', Numeric(12, 2)),
Column('load_ee', Numeric(12, 2)),
Column('load_es', Numeric(12, 2)),
Column('load_fi', Numeric(12, 2)),
Column('load_fr', Numeric(12, 2)),
Column('load_gb', Numeric(12, 2)),
Column('load_gr', Numeric(12, 2)),
Column('load_hr', Numeric(12, 2)),
Column('load_hu', Numeric(12, 2)),
Column('load_ie', Numeric(12, 2)),
Column('load_is', Numeric(12, 2)),
Column('load_it', Numeric(12, 2)),
Column('load_lt', Numeric(12, 2)),
Column('load_lu', Numeric(12, 2)),
Column('load_lv', Numeric(12, 2)),
Column('load_me', Numeric(12, 2)),
Column('load_mk', Numeric(12, 2)),
Column('load_ni', Numeric(12, 2)),
Column('load_nl', Numeric(12, 2)),
Column('load_no', Numeric(12, 2)),
Column('load_pl', Numeric(12, 2)),
Column('load_pt', Numeric(12, 2)),
Column('load_ro', Numeric(12, 2)),
Column('load_rs', Numeric(12, 2)),
Column('load_se', Numeric(12, 2)),
Column('load_si', Numeric(12, 2)),
Column('load_sk', Numeric(12, 2)),
Column('load_uaw', Numeric(12, 2)),
Column('solar_de_capacity', Numeric(12, 2)),
Column('solar_de_forecast', Numeric(12, 2)),
Column('solar_de_generation', Numeric(12, 2)),
Column('solar_de_profile', Numeric(12, 2)),
Column('solar_de50hertz_forecast', Numeric(12, 2)),
Column('solar_de50hertz_generation', Numeric(12, 2)),
Column('solar_deamprion_forecast', Numeric(12, 2)),
Column('solar_deamprion_generation', Numeric(12, 2)),
Column('solar_detennet_forecast', Numeric(12, 2)),
Column('solar_detennet_generation', Numeric(12, 2)),
Column('solar_detransnetbw_forecast', Numeric(12, 2)),
Column('solar_detransnetbw_generation', Numeric(12, 2)),
Column('wind_de_capacity', Numeric(12, 2)),
Column('wind_de_forecast', Numeric(12, 2)),
Column('wind_de_generation', Numeric(12, 2)),
Column('wind_de_profile', Numeric(12, 2)),
Column('wind_de50hertz_forecast', Numeric(12, 2)),
Column('wind_de50hertz_generation', Numeric(12, 2)),
Column('wind_deamprion_forecast', Numeric(12, 2)),
Column('wind_deamprion_generation', Numeric(12, 2)),
Column('wind_detennet_forecast', Numeric(12, 2)),
Column('wind_detennet_generation', Numeric(12, 2)),
Column('wind_detransnetbw_forecast', Numeric(12, 2)),
Column('wind_detransnetbw_generation', Numeric(12, 2)),
schema='model_draft'
)
t_osm_deu_polygon_urban_buffer100_mview = Table(
'osm_deu_polygon_urban_buffer100_mview', metadata,
Column('id', Integer, unique=True),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='model_draft'
)
t_ren_feedin_by_gen_id = Table(
'ren_feedin_by_gen_id', metadata,
Column('generator_id', BigInteger),
Column('feedin', ARRAY(Float(precision=53))),
schema='model_draft'
)
t_ren_feedin_foreign = Table(
'ren_feedin_foreign', metadata,
Column('generator_id', BigInteger),
Column('feedin', ARRAY(Float(precision=53))),
schema='model_draft'
)
class RenpassGisEconomicScenario(Base):
__tablename__ = 'renpass_gis_economic_scenario'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.renpass_gis_economic_scenario_id_seq'::regclass)"))
name = Column(String(250), nullable=False, unique=True)
class RenpassGisParameterRegion(Base):
__tablename__ = 'renpass_gis_parameter_region'
__table_args__ = {'schema': 'model_draft'}
gid = Column(Integer, primary_key=True)
u_region_id = Column(String(14), nullable=False)
stat_level = Column(Integer)
geom = Column(Geometry('MULTIPOLYGON', 4326))
geom_point = Column(Geometry('POINT', 4326))
class RenpassgisEconomicWeatherpointVoronoi(Base):
__tablename__ = 'renpassgis_economic_weatherpoint_voronoi'
__table_args__ = {'schema': 'model_draft'}
geom = Column(Geometry('POLYGON', 4326), index=True)
id = Column(Integer, primary_key=True)
class RenpassgisEconomyClimatepointVoronoi(Base):
__tablename__ = 'renpassgis_economy_climatepoint_voronoi'
__table_args__ = {'schema': 'model_draft'}
geom = Column(Geometry('POLYGON', 4326), index=True)
id = Column(Integer, primary_key=True)
class RliResearchInstitute(Base):
__tablename__ = 'rli_research_institute'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.rli_research_institute_id_seq'::regclass)"))
name = Column(Text)
short_name = Column(Text)
country_name = Column(Text)
country_code = Column(Text)
city = Column(Text)
project_id = Column(Text)
website = Column(Text)
osm_id = Column(BigInteger)
updated = Column(DateTime(True))
source = Column(Text)
lon = Column(Float(53))
lat = Column(Float(53))
geom = Column(Geometry('POINT', 3035), index=True)
class ScenarioLog(Base):
__tablename__ = 'scenario_log'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.scenario_log_id_seq'::regclass)"))
project = Column(Text)
version = Column(Text)
io = Column(Text)
schema_name = Column(Text)
table_name = Column(Text)
script_name = Column(Text)
entries = Column(Integer)
comment = Column(Text)
user_name = Column(Text)
timestamp = Column(DateTime)
meta_data = Column(Text)
t_scn_nep2035_b2_line = Table(
'scn_nep2035_b2_line', metadata,
Column('scn_name', String, nullable=False, server_default=text("'Status Quo'::character varying")),
Column('project', String),
Column('project_id', BigInteger),
Column('startpunkt', String),
Column('endpunkt', String),
Column('spannung', BigInteger),
Column('s_nom', Numeric, server_default=text("0")),
Column('cables', BigInteger),
Column('nova', String),
Column('geom', Geometry('MULTILINESTRING', 4326)),
schema='model_draft'
)
class SqlalchemyLinestring(Base):
__tablename__ = 'sqlalchemy_linestring'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.sqlalchemy_linestring_id_seq'::regclass)"))
geom = Column(Geometry('LINESTRING'))
class SqlalchemyPoint(Base):
__tablename__ = 'sqlalchemy_point'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.sqlalchemy_point_id_seq'::regclass)"))
geom = Column(Geometry('POINT'))
class SqlalchemyPolygon(Base):
__tablename__ = 'sqlalchemy_polygon'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.sqlalchemy_polygon_id_seq'::regclass)"))
geom = Column(Geometry('POLYGON'))
class SupplyWriWorldpowerwatch(Base):
__tablename__ = 'supply_wri_worldpowerwatch'
__table_args__ = {'schema': 'model_draft'}
pw_idnr = Column(String, primary_key=True)
geom = Column(Geometry('POINT', 4326))
name = Column(String)
capacity_mw = Column(Float(53))
year_of_capacity_data = Column(String)
annual_generation_gwh = Column(String)
year_of_generation_data = Column(String)
country = Column(String)
owner = Column(String)
source = Column(String)
url = Column(String)
latitude = Column(Float(53))
longitude = Column(Float(53))
fuel1 = Column(String)
fuel2 = Column(String)
fuel3 = Column(String)
fuel4 = Column(String)
field_17 = Column(String)
field_18 = Column(String)
t_temp_supply_aggr_weather = Table(
'temp_supply_aggr_weather', metadata,
Column('aggr_id', BigInteger),
Column('w_id', BigInteger),
Column('scn_name', String),
Column('bus', BigInteger),
Column('power_class', BigInteger),
Column('row_number', BigInteger),
schema='model_draft'
)
class TemplateTable(Base):
__tablename__ = 'template_table'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.template_table_id_seq'::regclass)"))
base_id = Column(Integer)
area_type = Column(Text)
geom_poly = Column(Geometry('POLYGON', 3035), index=True)
geom = Column(Geometry('POINT', 3035), index=True)
t_template_table_mview = Table(
'template_table_mview', metadata,
Column('id', Integer),
Column('base_id', Integer),
Column('area_type', Text),
Column('geom', Geometry, index=True),
schema='model_draft'
)
t_test_ego_supply_res_powerplant_nep2035_mview = Table(
'test_ego_supply_res_powerplant_nep2035_mview', metadata,
Column('version', Text),
Column('id', BigInteger),
Column('start_up_date', DateTime),
Column('electrical_capacity', Numeric),
Column('generation_type', Text),
Column('generation_subtype', String),
Column('thermal_capacity', Numeric),
Column('city', String),
Column('postcode', String),
Column('address', String),
Column('lon', Numeric),
Column('lat', Numeric),
Column('gps_accuracy', String),
Column('validation', String),
Column('notification_reason', String),
Column('eeg_id', String),
Column('tso', Float(53)),
Column('tso_eic', String),
Column('dso_id', String),
Column('dso', String),
Column('voltage_level_var', String),
Column('network_node', String),
Column('power_plant_id', String),
Column('source', String),
Column('comment', String),
Column('geom', Geometry('POINT', 4326)),
Column('subst_id', BigInteger),
Column('otg_id', BigInteger),
Column('un_id', BigInteger),
Column('voltage_level', SmallInteger),
Column('la_id', Integer),
Column('mvlv_subst_id', Integer),
Column('rea_sort', Integer),
Column('rea_flag', String),
Column('rea_geom_line', Geometry('LINESTRING', 3035)),
Column('rea_geom_new', Geometry('POINT', 3035)),
Column('preversion', Text),
Column('flag', String),
Column('scenario', String),
Column('nuts', String),
schema='model_draft'
)
class TestTable(Base):
__tablename__ = 'test_table'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.test_table_id_seq'::regclass)"))
year = Column(Integer)
value = Column(Float(53))
geom = Column(Geometry('POINT', 4326))
t_way_substations_test = Table(
'way_substations_test', metadata,
Column('id', BigInteger),
Column('tags', ARRAY(Text())),
Column('geom', Geometry),
schema='model_draft'
)
class WnAbwBkgVg2504Kr(Base):
__tablename__ = 'wn_abw_bkg_vg250_4_krs'
__table_args__ = {'schema': 'model_draft'}
reference_date = Column(Date, primary_key=True, nullable=False)
id = Column(Integer, primary_key=True, nullable=False, server_default=text("nextval('model_draft.wn_abw_bkg_vg250_4_krs_id_seq'::regclass)"))
ade = Column(Float(53))
gf = Column(Float(53))
bsg = Column(Float(53))
rs = Column(String(12))
ags = Column(String(12))
sdv_rs = Column(String(12))
gen = Column(String(50))
bez = Column(String(50))
ibz = Column(Float(53))
bem = Column(String(75))
nbd = Column(String(4))
sn_l = Column(String(2))
sn_r = Column(String(1))
sn_k = Column(String(2))
sn_v1 = Column(String(2))
sn_v2 = Column(String(2))
sn_g = Column(String(3))
fk_s3 = Column(String(2))
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
wsk = Column(Date)
debkg_id = Column(String(16))
geom = Column(Geometry('MULTIPOLYGON', 31467), index=True)
class WnAbwBkgVg2506Gem(Base):
__tablename__ = 'wn_abw_bkg_vg250_6_gem'
__table_args__ = {'schema': 'model_draft'}
reference_date = Column(Date, primary_key=True, nullable=False)
id = Column(Integer, primary_key=True, nullable=False, server_default=text("nextval('model_draft.wn_abw_bkg_vg250_6_gem_id_seq'::regclass)"))
ade = Column(Float(53))
gf = Column(Float(53))
bsg = Column(Float(53))
rs = Column(String(12))
ags = Column(String(12))
sdv_rs = Column(String(12))
gen = Column(String(50))
bez = Column(String(50))
ibz = Column(Float(53))
bem = Column(String(75))
nbd = Column(String(4))
sn_l = Column(String(2))
sn_r = Column(String(1))
sn_k = Column(String(2))
sn_v1 = Column(String(2))
sn_v2 = Column(String(2))
sn_g = Column(String(3))
fk_s3 = Column(String(2))
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
wsk = Column(Date)
debkg_id = Column(String(16))
geom = Column(Geometry('MULTIPOLYGON', 31467), index=True)
class WnAbwDemandElT(Base):
__tablename__ = 'wn_abw_demand_el_ts'
__table_args__ = {'schema': 'model_draft'}
load_id = Column(BigInteger, primary_key=True)
bus = Column(BigInteger)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
subst_id = Column(Integer)
class WnAbwEgoDemandHvLargescaleconsumer(Base):
__tablename__ = 'wn_abw_ego_demand_hv_largescaleconsumer'
__table_args__ = {'schema': 'model_draft'}
polygon_id = Column(Integer, primary_key=True)
area_ha = Column(Float(53))
powerplant_id = Column(Integer)
voltage_level = Column(SmallInteger)
subst_id = Column(Integer)
otg_id = Column(Integer)
un_id = Column(Integer)
consumption = Column(Numeric)
peak_load = Column(Numeric)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
geom_centre = Column(Geometry('POINT', 3035), index=True)
hvmv_subst_id = Column(Integer)
class WnAbwEgoDpConvPowerplant(Base):
__tablename__ = 'wn_abw_ego_dp_conv_powerplant'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, primary_key=True, nullable=False)
gid = Column(Integer, primary_key=True, nullable=False)
bnetza_id = Column(Text)
company = Column(Text)
name = Column(Text)
postcode = Column(Text)
city = Column(Text)
street = Column(Text)
state = Column(Text)
block = Column(Text)
commissioned_original = Column(Text)
commissioned = Column(Float(53))
retrofit = Column(Float(53))
shutdown = Column(Float(53))
status = Column(Text)
fuel = Column(Text)
technology = Column(Text)
type = Column(Text)
eeg = Column(Text)
chp = Column(Text)
capacity = Column(Float(53))
capacity_uba = Column(Float(53))
chp_capacity_uba = Column(Float(53))
efficiency_data = Column(Float(53))
efficiency_estimate = Column(Float(53))
network_node = Column(Text)
voltage = Column(Text)
network_operator = Column(Text)
name_uba = Column(Text)
lat = Column(Float(53))
lon = Column(Float(53))
comment = Column(Text)
geom = Column(Geometry('POINT', 4326), index=True)
voltage_level = Column(SmallInteger)
subst_id = Column(BigInteger)
otg_id = Column(BigInteger)
un_id = Column(BigInteger)
preversion = Column(Text)
la_id = Column(Integer)
scenario = Column(Text, primary_key=True, nullable=False, server_default=text("'none'::text"))
flag = Column(Text)
nuts = Column(String)
class WnAbwEgoDpHvmvSubstation(Base):
__tablename__ = 'wn_abw_ego_dp_hvmv_substation'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, nullable=False)
subst_id = Column(Integer, primary_key=True)
lon = Column(Float(53))
lat = Column(Float(53))
point = Column(Geometry('POINT', 4326))
polygon = Column(Geometry)
voltage = Column(Text)
power_type = Column(Text)
substation = Column(Text)
osm_id = Column(Text)
osm_www = Column(Text)
frequency = Column(Text)
subst_name = Column(Text)
ref = Column(Text)
operator = Column(Text)
dbahn = Column(Text)
status = Column(SmallInteger)
otg_id = Column(BigInteger)
ags_0 = Column(Text)
geom = Column(Geometry('POINT', 3035), index=True)
class WnAbwEgoDpLoadarea(Base):
__tablename__ = 'wn_abw_ego_dp_loadarea'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, primary_key=True, nullable=False)
id = Column(Integer, primary_key=True, nullable=False)
subst_id = Column(Integer)
area_ha = Column(Numeric)
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
otg_id = Column(Integer)
un_id = Column(Integer)
zensus_sum = Column(Integer)
zensus_count = Column(Integer)
zensus_density = Column(Numeric)
ioer_sum = Column(Numeric)
ioer_count = Column(Integer)
ioer_density = Column(Numeric)
sector_area_residential = Column(Numeric)
sector_area_retail = Column(Numeric)
sector_area_industrial = Column(Numeric)
sector_area_agricultural = Column(Numeric)
sector_area_sum = Column(Numeric)
sector_share_residential = Column(Numeric)
sector_share_retail = Column(Numeric)
sector_share_industrial = Column(Numeric)
sector_share_agricultural = Column(Numeric)
sector_share_sum = Column(Numeric)
sector_count_residential = Column(Integer)
sector_count_retail = Column(Integer)
sector_count_industrial = Column(Integer)
sector_count_agricultural = Column(Integer)
sector_count_sum = Column(Integer)
sector_consumption_residential = Column(Float(53))
sector_consumption_retail = Column(Float(53))
sector_consumption_industrial = Column(Float(53))
sector_consumption_agricultural = Column(Float(53))
sector_consumption_sum = Column(Float(53))
sector_peakload_retail = Column(Float(53))
sector_peakload_residential = Column(Float(53))
sector_peakload_industrial = Column(Float(53))
sector_peakload_agricultural = Column(Float(53))
geom_centroid = Column(Geometry('POINT', 3035))
geom_surfacepoint = Column(Geometry('POINT', 3035))
geom_centre = Column(Geometry('POINT', 3035))
geom = Column(Geometry('POLYGON', 3035), index=True)
class WnAbwEgoDpMvGriddistrict(Base):
__tablename__ = 'wn_abw_ego_dp_mv_griddistrict'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, nullable=False)
subst_id = Column(Integer, primary_key=True)
subst_sum = Column(Integer)
type1 = Column(Integer)
type1_cnt = Column(Integer)
type2 = Column(Integer)
type2_cnt = Column(Integer)
type3 = Column(Integer)
type3_cnt = Column(Integer)
group = Column(CHAR(1))
gem = Column(Integer)
gem_clean = Column(Integer)
zensus_sum = Column(Integer)
zensus_count = Column(Integer)
zensus_density = Column(Numeric)
population_density = Column(Numeric)
la_count = Column(Integer)
area_ha = Column(Numeric)
la_area = Column(Numeric(10, 1))
free_area = Column(Numeric(10, 1))
area_share = Column(Numeric(4, 1))
consumption = Column(Numeric)
consumption_per_area = Column(Numeric)
dea_cnt = Column(Integer)
dea_capacity = Column(Numeric)
lv_dea_cnt = Column(Integer)
lv_dea_capacity = Column(Numeric)
mv_dea_cnt = Column(Integer)
mv_dea_capacity = Column(Numeric)
geom_type = Column(Text)
geom = Column(Geometry('MULTIPOLYGON', 3035), index=True)
consumption_total = Column(Integer)
class WnAbwEgoDpResPowerplant(Base):
__tablename__ = 'wn_abw_ego_dp_res_powerplant'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, primary_key=True, nullable=False)
id = Column(BigInteger, primary_key=True, nullable=False)
start_up_date = Column(DateTime)
electrical_capacity = Column(Numeric)
generation_type = Column(Text)
generation_subtype = Column(String)
thermal_capacity = Column(Numeric)
city = Column(String)
postcode = Column(String)
address = Column(String)
lon = Column(Numeric)
lat = Column(Numeric)
gps_accuracy = Column(String)
validation = Column(String)
notification_reason = Column(String)
eeg_id = Column(String)
tso = Column(Float(53))
tso_eic = Column(String)
dso_id = Column(String)
dso = Column(String)
voltage_level_var = Column(String)
network_node = Column(String)
power_plant_id = Column(String)
source = Column(String)
comment = Column(String)
geom = Column(Geometry('POINT', 4326), index=True)
subst_id = Column(BigInteger)
otg_id = Column(BigInteger)
un_id = Column(BigInteger)
voltage_level = Column(SmallInteger)
la_id = Column(Integer)
mvlv_subst_id = Column(Integer)
rea_sort = Column(Integer)
rea_flag = Column(String)
rea_geom_line = Column(Geometry('LINESTRING', 3035))
rea_geom_new = Column(Geometry('POINT', 3035), index=True)
preversion = Column(Text)
flag = Column(String)
scenario = Column(String, primary_key=True, nullable=False, server_default=text("'none'::character varying"))
nuts = Column(String)
class WnAbwEgoPfHvBus(Base):
__tablename__ = 'wn_abw_ego_pf_hv_bus'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, primary_key=True, nullable=False)
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
bus_id = Column(BigInteger, primary_key=True, nullable=False)
v_nom = Column(Float(53))
current_type = Column(Text, server_default=text("'AC'::text"))
v_mag_pu_min = Column(Float(53), server_default=text("0"))
v_mag_pu_max = Column(Float(53))
geom = Column(Geometry('POINT', 4326), index=True)
hvmv_subst_id = Column(Integer)
region_bus = Column(Boolean, server_default=text("false"))
class WnAbwEgoPfHvLine(Base):
__tablename__ = 'wn_abw_ego_pf_hv_line'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, primary_key=True, nullable=False)
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
line_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
x = Column(Numeric, server_default=text("0"))
r = Column(Numeric, server_default=text("0"))
g = Column(Numeric, server_default=text("0"))
b = Column(Numeric, server_default=text("0"))
s_nom = Column(Numeric, server_default=text("0"))
s_nom_extendable = Column(Boolean, server_default=text("false"))
s_nom_min = Column(Float(53), server_default=text("0"))
s_nom_max = Column(Float(53))
capital_cost = Column(Float(53))
length = Column(Float(53))
cables = Column(Integer)
frequency = Column(Numeric)
terrain_factor = Column(Float(53), server_default=text("1"))
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
class WnAbwEgoPfHvTransformer(Base):
__tablename__ = 'wn_abw_ego_pf_hv_transformer'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, primary_key=True, nullable=False)
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
trafo_id = Column(BigInteger, primary_key=True, nullable=False)
bus0 = Column(BigInteger)
bus1 = Column(BigInteger)
x = Column(Numeric, server_default=text("0"))
r = Column(Numeric, server_default=text("0"))
g = Column(Numeric, server_default=text("0"))
b = Column(Numeric, server_default=text("0"))
s_nom = Column(Float(53), server_default=text("0"))
s_nom_extendable = Column(Boolean, server_default=text("false"))
s_nom_min = Column(Float(53), server_default=text("0"))
s_nom_max = Column(Float(53))
tap_ratio = Column(Float(53))
phase_shift = Column(Float(53))
capital_cost = Column(Float(53), server_default=text("0"))
geom = Column(Geometry('MULTILINESTRING', 4326))
topo = Column(Geometry('LINESTRING', 4326))
class WnAbwLauProtectedLandscapeElement(Base):
__tablename__ = 'wn_abw_lau_protected_landscape_elements'
__table_args__ = {'schema': 'model_draft'}
id = Column(Integer, primary_key=True, server_default=text("nextval('model_draft.wn_abw_lau_protected_landscape_elements_id_seq'::regclass)"))
geom = Column(Geometry('MULTIPOLYGON', 3035))
class WnAbwPowerplantT(Base):
__tablename__ = 'wn_abw_powerplant_ts'
__table_args__ = {'schema': 'model_draft'}
generator_id = Column(BigInteger, primary_key=True)
bus = Column(BigInteger)
dispatch = Column(Text)
control = Column(Text)
p_nom = Column(Float(53), server_default=text("0"))
generation_type = Column(Text)
p_set = Column(ARRAY(Float(precision=53)))
subst_id = Column(Integer)
t_wn_abw_results_line = Table(
'wn_abw_results_line', metadata,
Column('line_id', BigInteger),
Column('loading_mean', Float(53)),
Column('loading_max', Float(53)),
schema='model_draft'
)
class WnAbwResultsLineEgo(Base):
__tablename__ = 'wn_abw_results_line_ego'
__table_args__ = {'schema': 'model_draft'}
line_id = Column(BigInteger, primary_key=True)
loading_max = Column(Float(53), server_default=text("0"))
loading_mean = Column(Float(53), server_default=text("0"))
class WnAbwStatsResPowerplantsPerMvgd(Base):
__tablename__ = 'wn_abw_stats_res_powerplants_per_mvgd'
__table_args__ = {'schema': 'model_draft'}
subst_id = Column(BigInteger, primary_key=True, nullable=False)
generation_type = Column(Text, primary_key=True, nullable=False)
count = Column(Integer)
capacity_mw = Column(Numeric)
annual_energy_gwh = Column(Numeric)
class EgoDemandPfLoadSingle(Base):
__tablename__ = 'ego_demand_pf_load_single'
__table_args__ = (
ForeignKeyConstraint(['bus', 'scn_name'], ['model_draft.ego_grid_pf_hv_bus.bus_id', 'model_draft.ego_grid_pf_hv_bus.scn_name']),
{'schema': 'model_draft'}
)
scn_name = Column(String, nullable=False, server_default=text("'Status Quo'::character varying"))
load_id = Column(BigInteger, primary_key=True)
bus = Column(BigInteger)
sign = Column(Float(53), server_default=text("'-1'::integer"))
e_annual = Column(Float(53))
ego_grid_pf_hv_bus = relationship('EgoGridPfHvBus')
class EgoGridPfHvBusVMagSet(Base):
__tablename__ = 'ego_grid_pf_hv_bus_v_mag_set'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
bus_id = Column(BigInteger, primary_key=True, nullable=False)
temp_id = Column(ForeignKey('model_draft.ego_grid_pf_hv_temp_resolution.temp_id'), primary_key=True, nullable=False)
v_mag_pu_set = Column(ARRAY(Float(precision=53)))
temp = relationship('EgoGridPfHvTempResolution')
class EgoGridPfHvExtensionGenerator(Base):
__tablename__ = 'ego_grid_pf_hv_extension_generator'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False)
generator_id = Column(BigInteger, primary_key=True, nullable=False)
bus = Column(BigInteger)
dispatch = Column(Text, server_default=text("'flexible'::text"))
control = Column(Text, server_default=text("'PQ'::text"))
p_nom = Column(Float(53), server_default=text("0"))
p_nom_extendable = Column(Boolean, server_default=text("false"))
p_nom_min = Column(Float(53), server_default=text("0"))
p_nom_max = Column(Float(53))
p_min_pu_fixed = Column(Float(53), server_default=text("0"))
p_max_pu_fixed = Column(Float(53), server_default=text("1"))
sign = Column(Float(53), server_default=text("1"))
source = Column(BigInteger)
marginal_cost = Column(Float(53))
capital_cost = Column(Float(53))
efficiency = Column(Float(53))
class EgoGridPfHvExtensionGeneratorPqSet(Base):
__tablename__ = 'ego_grid_pf_hv_extension_generator_pq_set'
__table_args__ = {'schema': 'model_draft'}
version = Column(Text, primary_key=True, nullable=False)
scn_name = Column(String, primary_key=True, nullable=False)
generator_id = Column(BigInteger, primary_key=True, nullable=False)
temp_id = Column(Integer, primary_key=True, nullable=False)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
p_min_pu = Column(ARRAY(Float(precision=53)))
p_max_pu = Column(ARRAY(Float(precision=53)))
class EgoGridPfHvExtensionLoadPqSet(Base):
__tablename__ = 'ego_grid_pf_hv_extension_load_pq_set'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
load_id = Column(BigInteger, primary_key=True, nullable=False)
temp_id = Column(ForeignKey('model_draft.ego_grid_pf_hv_temp_resolution.temp_id'), primary_key=True, nullable=False)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
temp = relationship('EgoGridPfHvTempResolution')
class EgoGridPfHvGenerator(Base):
__tablename__ = 'ego_grid_pf_hv_generator'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
generator_id = Column(BigInteger, primary_key=True, nullable=False)
bus = Column(BigInteger)
dispatch = Column(Text, server_default=text("'flexible'::text"))
control = Column(Text, server_default=text("'PQ'::text"))
p_nom = Column(Float(53), server_default=text("0"))
p_nom_extendable = Column(Boolean, server_default=text("false"))
p_nom_min = Column(Float(53), server_default=text("0"))
p_nom_max = Column(Float(53))
p_min_pu_fixed = Column(Float(53), server_default=text("0"))
p_max_pu_fixed = Column(Float(53), server_default=text("1"))
sign = Column(Float(53), server_default=text("1"))
source = Column(ForeignKey('model_draft.ego_grid_pf_hv_source.source_id'), index=True)
marginal_cost = Column(Float(53))
capital_cost = Column(Float(53))
efficiency = Column(Float(53))
ego_grid_pf_hv_source = relationship('EgoGridPfHvSource')
class EgoGridPfHvGeneratorPqSet(Base):
__tablename__ = 'ego_grid_pf_hv_generator_pq_set'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
generator_id = Column(BigInteger, primary_key=True, nullable=False)
temp_id = Column(ForeignKey('model_draft.ego_grid_pf_hv_temp_resolution.temp_id'), primary_key=True, nullable=False)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
p_min_pu = Column(ARRAY(Float(precision=53)))
p_max_pu = Column(ARRAY(Float(precision=53)))
temp = relationship('EgoGridPfHvTempResolution')
class EgoGridPfHvLoadPqSet(Base):
__tablename__ = 'ego_grid_pf_hv_load_pq_set'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
load_id = Column(BigInteger, primary_key=True, nullable=False)
temp_id = Column(ForeignKey('model_draft.ego_grid_pf_hv_temp_resolution.temp_id'), primary_key=True, nullable=False)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
temp = relationship('EgoGridPfHvTempResolution')
class EgoGridPfHvStorage(Base):
__tablename__ = 'ego_grid_pf_hv_storage'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
storage_id = Column(BigInteger, primary_key=True, nullable=False)
bus = Column(BigInteger)
dispatch = Column(Text, server_default=text("'flexible'::text"))
control = Column(Text, server_default=text("'PQ'::text"))
p_nom = Column(Float(53), server_default=text("0"))
p_nom_extendable = Column(Boolean, server_default=text("false"))
p_nom_min = Column(Float(53), server_default=text("0"))
p_nom_max = Column(Float(53))
p_min_pu_fixed = Column(Float(53), server_default=text("0"))
p_max_pu_fixed = Column(Float(53), server_default=text("1"))
sign = Column(Float(53), server_default=text("1"))
source = Column(ForeignKey('model_draft.ego_grid_pf_hv_source.source_id'), index=True)
marginal_cost = Column(Float(53))
capital_cost = Column(Float(53))
efficiency = Column(Float(53))
soc_initial = Column(Float(53))
soc_cyclic = Column(Boolean, server_default=text("false"))
max_hours = Column(Float(53))
efficiency_store = Column(Float(53))
efficiency_dispatch = Column(Float(53))
standing_loss = Column(Float(53))
ego_grid_pf_hv_source = relationship('EgoGridPfHvSource')
class EgoGridPfHvStoragePqSet(Base):
__tablename__ = 'ego_grid_pf_hv_storage_pq_set'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
storage_id = Column(BigInteger, primary_key=True, nullable=False)
temp_id = Column(ForeignKey('model_draft.ego_grid_pf_hv_temp_resolution.temp_id'), primary_key=True, nullable=False)
p_set = Column(ARRAY(Float(precision=53)))
q_set = Column(ARRAY(Float(precision=53)))
p_min_pu = Column(ARRAY(Float(precision=53)))
p_max_pu = Column(ARRAY(Float(precision=53)))
soc_set = Column(ARRAY(Float(precision=53)))
inflow = Column(ARRAY(Float(precision=53)))
temp = relationship('EgoGridPfHvTempResolution')
class EgoSupplyPfGeneratorSingle(Base):
__tablename__ = 'ego_supply_pf_generator_single'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
generator_id = Column(BigInteger, primary_key=True, nullable=False)
bus = Column(BigInteger)
dispatch = Column(Text, server_default=text("'flexible'::text"))
control = Column(Text, server_default=text("'PQ'::text"))
p_nom = Column(Float(53), server_default=text("0"))
p_nom_extendable = Column(Boolean, server_default=text("false"))
p_nom_min = Column(Float(53), server_default=text("0"))
p_nom_max = Column(Float(53))
p_min_pu_fixed = Column(Float(53), server_default=text("0"))
p_max_pu_fixed = Column(Float(53), server_default=text("1"))
sign = Column(Float(53), server_default=text("1"))
source = Column(ForeignKey('model_draft.ego_grid_pf_hv_source.source_id'))
marginal_cost = Column(Float(53))
capital_cost = Column(Float(53))
efficiency = Column(Float(53))
w_id = Column(BigInteger)
aggr_id = Column(BigInteger)
power_class = Column(BigInteger)
voltage_level = Column(SmallInteger)
ego_grid_pf_hv_source = relationship('EgoGridPfHvSource')
class EgoSupplyPfStorageSingle(Base):
__tablename__ = 'ego_supply_pf_storage_single'
__table_args__ = {'schema': 'model_draft'}
scn_name = Column(String, primary_key=True, nullable=False, server_default=text("'Status Quo'::character varying"))
storage_id = Column(BigInteger, primary_key=True, nullable=False)
bus = Column(BigInteger)
dispatch = Column(Text, server_default=text("'flexible'::text"))
control = Column(Text, server_default=text("'PQ'::text"))
p_nom = Column(Float(53), server_default=text("0"))
p_nom_extendable = Column(Boolean, server_default=text("false"))
p_nom_min = Column(Float(53), server_default=text("0"))
p_nom_max = Column(Float(53))
p_min_pu_fixed = Column(Float(53), server_default=text("'-1'::integer"))
p_max_pu_fixed = Column(Float(53), server_default=text("1"))
sign = Column(Float(53), server_default=text("1"))
source = Column(ForeignKey('model_draft.ego_grid_pf_hv_source.source_id'))
marginal_cost = Column(Float(53))
capital_cost = Column(Float(53))
efficiency = Column(Float(53))
soc_initial = Column(Float(53))
soc_cyclic = Column(Boolean, server_default=text("true"))
max_hours = Column(Float(53))
efficiency_store = Column(Float(53))
efficiency_dispatch = Column(Float(53))
standing_loss = Column(Float(53))
aggr_id = Column(Integer)
ego_grid_pf_hv_source = relationship('EgoGridPfHvSource')
class OpenfredValue(Base):
__tablename__ = 'openfred_values'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.openfred_values_id_seq'::regclass)"))
v = Column(Float(53), nullable=False)
altitude = Column(Float(53))
timestamp_id = Column(ForeignKey('model_draft.openfred_timestamps.id'), nullable=False, server_default=text("1"))
location_id = Column(ForeignKey('model_draft.openfred_locations.id'), nullable=False)
variable_id = Column(ForeignKey('model_draft.openfred_variables.id'), nullable=False)
location = relationship('OpenfredLocation')
timestamp = relationship('OpenfredTimestamp')
variable = relationship('OpenfredVariable')
class RenpassGisEconomicLinearTransformer(Base):
__tablename__ = 'renpass_gis_economic_linear_transformer'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.renpass_gis_economic_linear_transformer_id_seq'::regclass)"))
scenario_id = Column(ForeignKey('model_draft.renpass_gis_economic_scenario.id'))
label = Column(String(250))
source = Column(String(250))
target = Column(String(250))
conversion_factors = Column(ARRAY(Numeric()))
summed_min = Column(ARRAY(Numeric()))
nominal_value = Column(ARRAY(Numeric()))
actual_value = Column(ARRAY(Numeric()))
fixed = Column(Boolean)
variable_costs = Column(ARRAY(Numeric()))
fixed_costs = Column(ARRAY(Numeric()))
scenario = relationship('RenpassGisEconomicScenario')
class RenpassGisEconomicSink(Base):
__tablename__ = 'renpass_gis_economic_sink'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.renpass_gis_economic_sink_id_seq'::regclass)"))
scenario_id = Column(ForeignKey('model_draft.renpass_gis_economic_scenario.id'))
label = Column(String(250))
source = Column(String(250))
target = Column(String(250))
nominal_value = Column(ARRAY(Numeric()))
actual_value = Column(ARRAY(Numeric()))
fixed = Column(Boolean)
scenario = relationship('RenpassGisEconomicScenario')
class RenpassGisEconomicSource(Base):
__tablename__ = 'renpass_gis_economic_source'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.renpass_gis_economic_source_id_seq'::regclass)"))
scenario_id = Column(ForeignKey('model_draft.renpass_gis_economic_scenario.id'))
label = Column(String(250))
source = Column(String(250))
target = Column(String(250))
nominal_value = Column(ARRAY(Numeric()))
actual_value = Column(ARRAY(Numeric()))
variable_costs = Column(ARRAY(Numeric()))
fixed = Column(Boolean)
scenario = relationship('RenpassGisEconomicScenario')
class RenpassGisEconomicStorage(Base):
__tablename__ = 'renpass_gis_economic_storage'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.renpass_gis_economic_storage_id_seq'::regclass)"))
scenario_id = Column(ForeignKey('model_draft.renpass_gis_economic_scenario.id'))
label = Column(String(250))
source = Column(String(250))
target = Column(String(250))
conversion_factors = Column(ARRAY(Numeric()))
summed_min = Column(ARRAY(Numeric()))
nominal_value = Column(ARRAY(Numeric()))
min = Column(ARRAY(Numeric()))
max = Column(ARRAY(Numeric()))
actual_value = Column(ARRAY(Numeric()))
fixed = Column(Boolean)
variable_costs = Column(ARRAY(Numeric()))
fixed_costs = Column(ARRAY(Numeric()))
nominal_capacity = Column(ARRAY(Numeric()))
capacity_loss = Column(ARRAY(Numeric()))
inflow_conversion_factor = Column(ARRAY(Numeric()))
outflow_conversion_factor = Column(ARRAY(Numeric()))
initial_capacity = Column(ARRAY(Numeric()))
capacity_min = Column(ARRAY(Numeric()))
capacity_max = Column(ARRAY(Numeric()))
scenario = relationship('RenpassGisEconomicScenario')
class RenpassGisScenarioResult(Base):
__tablename__ = 'renpass_gis_scenario_results'
__table_args__ = {'schema': 'model_draft'}
id = Column(BigInteger, primary_key=True, server_default=text("nextval('model_draft.renpass_gis_scenario_results_id_seq'::regclass)"))
scenario_id = Column(ForeignKey('model_draft.renpass_gis_economic_scenario.id'))
bus_label = Column(String(250))
type = Column(String(250))
obj_label = Column(String(250))
datetime = Column(DateTime)
val = Column(Numeric)
scenario = relationship('RenpassGisEconomicScenario')
| agpl-3.0 |
phobson/statsmodels | statsmodels/datasets/statecrime/data.py | 3 | 3141 | #! /usr/bin/env python
"""Statewide Crime Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Public domain."""
TITLE = """Statewide Crime Data 2009"""
SOURCE = """
All data is for 2009 and was obtained from the American Statistical Abstracts except as indicated below.
"""
DESCRSHORT = """State crime data 2009"""
DESCRLONG = DESCRSHORT
#suggested notes
NOTE = """::
Number of observations: 51
Number of variables: 8
Variable name definitions:
state
All 50 states plus DC.
violent
Rate of violent crimes / 100,000 population. Includes murder, forcible
rape, robbery, and aggravated assault. Numbers for Illinois and
Minnesota do not include forcible rapes. Footnote included with the
American Statistical Abstract table reads:
"The data collection methodology for the offense of forcible
rape used by the Illinois and the Minnesota state Uniform Crime
Reporting (UCR) Programs (with the exception of Rockford, Illinois,
and Minneapolis and St. Paul, Minnesota) does not comply with
national UCR guidelines. Consequently, their state figures for
forcible rape and violent crime (of which forcible rape is a part)
are not published in this table."
murder
Rate of murders / 100,000 population.
hs_grad
Precent of population having graduated from high school or higher.
poverty
% of individuals below the poverty line
white
Percent of population that is one race - white only. From 2009 American
Community Survey
single
Calculated from 2009 1-year American Community Survey obtained obtained
from Census. Variable is Male householder, no wife present, family
household combined with Female household, no husband prsent, family
household, divided by the total number of Family households.
urban
% of population in Urbanized Areas as of 2010 Census. Urbanized
Areas are area of 50,000 or more people."""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the statecrime data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=2, exog_idx=[7, 4, 3, 5],
dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=2, exog_idx=[7,4,3,5],
dtype=float, index_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
with open(filepath + '/statecrime.csv', 'rb') as f:
data = np.recfromtxt(f, delimiter=",", names=True, dtype=None)
return data
| bsd-3-clause |
minesense/VisTrails | scripts/dist/windows/Input/startup.py | 2 | 7926 | ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# Vistrails initialization file
##############################################################################
##############################################################################
# Basic configuration
# Comment this to bypass the logging mechanism
# configuration.nologger = True
# Uncomment this to prevent VisTrails's splash screen from appearing
# configuration.showSplash = False
# Uncomment this to enable VisTrails's python shell by default
# configuration.pythonPrompt = True
# Uncomment this to switch to the non-caching execution model
# configuration.useCache = False
# Uncomment this to start VisTrails with maximized windows
# configuration.maximizeWindows = True
# Uncomment this if you run multiple monitors, to start VisTrails
# with different windows in different monitors
# configuration.multiHeads = True
# Set verbosenessLevel to 1 or 2 to enable dumping of non-critical warnings
# and information messages to stderr.
# configuration.verbosenessLevel = 1 # 2#!/usr/bin/env python
# Vistrails initialization file
################################################################################
##############################################################################
# VisTrails packages.
# VisTrails packages are collections of modules that provide user-specified
# functionality to VisTrails. Use addPackage to let VisTrails know which
# packages you want enabled.
# Interpackage dependencies must currently be handled manually by the user.
# For example, the spreadsheet package depends on VTK for some functionality,
# so if you want that functionality, you should add the vtk package before
# the spreadsheet package.
# the vtk package is the main visualization package for VisTrails
addPackage('vtk')
# pythonCalc is an example package intended simply to demonstrate how to
# create new packages
addPackage('pythonCalc')
# ImageMagick uses the ImageMagick command-line suite to perform various
# tasks on images (conversion, filtering, etc).
#addPackage('ImageMagick')
# The spreadsheet package enables the Visualization Spreadsheet
addPackage('spreadsheet')
# The URL package provides an easy way to download files and use them as
# regular files in VisTrails pipelines.
addPackage('URL')
#matplotlib/pylab package for plotting and histograms
addPackage('pylab')
################################################################################
# Hooks
# Currently, there is only one hook in VisTrails: the startup hook. By adding
# arbitrary callables to the startup hook, it is possible to run user-defined
# code after all packages have been initialized, but before VisTrails runs.
# This is intended to show that it is possible to have user-defined code
# in specific places in VisTrails. If you think you need a hook somewhere that
# we haven't allowed yet, please let us know, and we'll include it in a future
# release.
def testHook():
"""Prints the Module class hierarchy to stdout."""
def printTree(n, indent = 0):
def iprint(str):
print '%s%s' % (" " * indent, str)
iprint('Class: %s' % n.descriptor.name)
for c in n.children:
printTree(c, indent+4)
import modules
import modules.module_registry
t = modules.module_registry.registry.classTree
printTree(t)
# Uncomment this line to install the startup hook
# addStartupHook(testHook)
##############################################################################
# If you have an appropriate Qt license, you can install signal inspectors,
# which might make debugging a whole lot easier. To do that, uncomment the
# following lines.
# import qt
# connections = {}
# def connectHandler(*args):
# """This handler writes all signal connections to /tmp/signalslotnames.txt"""
# emitter = args[0].__class__.__name__
# signal = args[1]
# f = signal.find('(')
# if f == -1:
# signal = signal[1:]
# else:
# signal = signal[1:f]
# try:
# receiver = args[2].im_class.__name__
# slot = args[2].im_func.__name__
# except AttributeError:
# receiver = args[2].__self__.__class__.__name__
# slot = args[2].__class__.__name__
# entry = (emitter, signal, receiver, slot)
# print entry
# global connections
# try:
# connections[emitter].add((signal, receiver, slot))
# except:
# connections[emitter] = set(((signal, receiver, slot),))
# signals = {}
# slots = {}
# sig_count = 1
# slot_count = 1
# f = file('/tmp/connections.txt', 'w')
# f.write('digraph {\n')
# for (k, v) in connections.iteritems():
# print k, v
# recs = {}
# for (sig, rec, sl) in v:
# if not signals.has_key(sig):
# signals[sig] = sig_count
# sig_count += 1
# if not slots.has_key(sl):
# slots[sl] = slot_count
# slot_count += 1
# try:
# recs[rec].append( str(signals[sig]) + ':' + str(slots[sl]))
# except:
# recs[rec] = [str(signals[sig]) + ':' + str(slots[sl])]
# for rec, sigslotlist in recs.iteritems():
# f.write('%s -> %s [label = "%s"];\n' % (k, rec, ";".join(sigslotlist)))
# # if not entry in connections:
# # f = file('/tmp/connections.txt', 'a')
# # f.write("%s %s %s\n" % emi)
# # f.close()
# # connections.add(entry)
# f.write('}\n')
# f.close()
# f = file('/tmp/signalslotnames.txt', 'w')
# sigs = [(v, k) for (k, v) in signals.items()]
# sigs.sort()
# sls = [(v, k) for (k, v) in slots.items()]
# sls.sort()
# f.write('signals: \n')
# for (k,v) in sigs:
# f.write('%s: %s\n' % (k, v))
# f.write('slots: \n')
# for (k,v) in sls:
# f.write('%s: %s\n' % (k, v))
# This line hooks connectHandler to Qt's signals. You can use user-defined
# code here.
# qt.enableSignalDebugging(connectCall = connectHandler)
| bsd-3-clause |
tosolveit/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
djgagne/scikit-learn | sklearn/tree/tests/test_tree.py | 11 | 47506 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import ignore_warnings
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
liyu1990/sklearn | sklearn/cluster/birch.py | 22 | 22730 | # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
Newman101/scipy | scipy/integrate/quadrature.py | 33 | 28087 | from __future__ import division, print_function, absolute_import
import numpy as np
import math
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special.orthogonal import p_roots
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
class AccuracyWarning(Warning):
pass
def _cached_p_roots(n):
"""
Cache p_roots results to speed up calls of the fixed_quad function.
"""
if n in _cached_p_roots.cache:
return _cached_p_roots.cache[n]
_cached_p_roots.cache[n] = p_roots(n)
return _cached_p_roots.cache[n]
_cached_p_roots.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
x, w = _cached_p_roots(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=0), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : int, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : {'avg', 'first', 'str'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]`
and :math:`\\Delta x = \\frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| bsd-3-clause |
f3r/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 35 | 13626 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from .base import _pkl_filepath
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = _pkl_filepath(data_home, filebase + ".pkl")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
UCSD-CCAL/ccal | ccal/correlate.py | 1 | 1960 | from numpy import full, nan
from numpy.random import seed, shuffle
from sklearn.linear_model import LinearRegression
from .COLOR_CATEGORICAL import COLOR_CATEGORICAL
from .compute_empirical_p_value import compute_empirical_p_value
from .plot_points import plot_points
def correlate(
x,
y,
n_permutation=0,
random_seed=20121020,
plot=True,
marker_size=16,
title=None,
xaxis_title=None,
yaxis_title=None,
html_file_path=None,
plotly_html_file_path=None,
):
model = LinearRegression()
xs = tuple((x_,) for x_ in x)
model.fit(xs, y)
r2 = model.score(xs, y)
if n_permutation:
permuted_r2s = full(n_permutation, nan)
m_ = LinearRegression()
y_ = y.copy()
seed(random_seed)
for i in range(n_permutation):
shuffle(y_)
m_.fit(xs, y_)
permuted_r2s[i] = m_.score(xs, y_)
p_value = min(
compute_empirical_p_value(r2, permuted_r2s, "less"),
compute_empirical_p_value(r2, permuted_r2s, "great"),
)
else:
p_value = None
if plot:
r2_p_value_str = "R^2={:.3f}".format(r2)
if p_value is not None:
r2_p_value_str += " & P-Value={:.3e}".format(p_value)
if title:
title += "\n{}".format(r2_p_value_str)
else:
title = r2_p_value_str
plot_points(
(x, x),
(y, model.coef_ * x + model.intercept_),
names=("Data", "Fit"),
modes=("markers", "lines"),
markers=(
dict(size=marker_size, color=COLOR_CATEGORICAL[0]),
dict(color=COLOR_CATEGORICAL[1]),
),
title=title,
xaxis_title=xaxis_title,
yaxis_title=yaxis_title,
html_file_path=html_file_path,
plotly_html_file_path=plotly_html_file_path,
)
return r2, p_value
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.