repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
MartinDelzant/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_noRot_cont/Geneva_noRot_cont_age4/peaks_reader.py | 33 | 2761 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
#input files
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
# ---------------------------------------------------
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
savetxt('peaks', max_values, delimiter='\t')
| gpl-2.0 |
mprelee/data-incubator-capstone | src/tinker.py | 1 | 1701 | # Look at words
# Matt Prelee
import pandas as pd
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import nltk
import re
from sklearn import base
from sklearn.linear_model import LinearRegression, Lasso, ElasticNet
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.kernel_ridge import KernelRidge
from sklearn.grid_search import GridSearchCV
import time
from scipy import stats
from sklearn.cluster import KMeans
from config import MAIN_DATA
from preprocessing import default_preprocess, remove_outliers_iqr
# Load data
df = default_preprocess(pickle.load(open(MAIN_DATA,'rb')))
kids = df[df.age_group == 'child']
kids.Cost.hist(bins=100)
kids1500 = kids[kids.Cost==1500]
print kids1500.summary.values
print kids1500['Profile Url'].values
#plt.show()
#sns.violinplot(df.fund_time,df.Country,alpha=0.5,inner='box')
#vect = TfidfVectorizer(lowercase=True,strip_accents='unicode',stop_words='english',decode_error='replace')
#kmeans = KMeans(n_clusters=8)
#tfidf = vect.fit_transform(df['story'])
#centers = kmeans.fit_transform(tfidf)
#cluster_model = Pipeline([\
# ('tfidf',vect),\
# ('kmeans',kmeans),\
# ])
#cluster_model = kmeans
#cluster_model.fit_transform(df['story'].values)
#cluster_model.fit_transform(df[['age','Cost','gender_binary']].values)
#df['clusters'] = cluster_model.predict(df[['age','Cost','gender_binary']].values)
#print df.groupby('clusters')['fund_time'].count()
#print df.groupby(df.clusters)['fund_time'].mean()
#sns.violinplot(df.fund_time,df.clusters,alpha=0.5,inner='box')
| gpl-2.0 |
vivekmishra1991/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 65 | 50308 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
jpautom/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
kambysese/mne-python | tutorials/misc/plot_report.py | 3 | 13606 | """
.. _tut-report:
Getting started with ``mne.Report``
===================================
This tutorial covers making interactive HTML summaries with
:class:`mne.Report`.
As usual we'll start by importing the modules we need and loading some
:ref:`example data <sample-dataset>`:
"""
import os
import matplotlib.pyplot as plt
import mne
###############################################################################
# Before getting started with :class:`mne.Report`, make sure the files you want
# to render follow the filename conventions defined by MNE:
#
# .. cssclass:: table-bordered
# .. rst-class:: midvalign
#
# ============== ==============================================================
# Data object Filename convention (ends with)
# ============== ==============================================================
# raw -raw.fif(.gz), -raw_sss.fif(.gz), -raw_tsss.fif(.gz),
# _meg.fif(.gz), _eeg.fif(.gz), _ieeg.fif(.gz)
# events -eve.fif(.gz)
# epochs -epo.fif(.gz)
# evoked -ave.fif(.gz)
# covariance -cov.fif(.gz)
# SSP projectors -proj.fif(.gz)
# trans -trans.fif(.gz)
# forward -fwd.fif(.gz)
# inverse -inv.fif(.gz)
# ============== ==============================================================
#
# Alternatively, the dash ``-`` in the filename may be replaced with an
# underscore ``_``.
#
# Basic reports
# ^^^^^^^^^^^^^
#
# The basic process for creating an HTML report is to instantiate the
# :class:`~mne.Report` class, then use the :meth:`~mne.Report.parse_folder`
# method to select particular files to include in the report. Which files are
# included depends on both the ``pattern`` parameter passed to
# :meth:`~mne.Report.parse_folder` and also the ``subject`` and
# ``subjects_dir`` parameters provided to the :class:`~mne.Report` constructor.
#
# .. sidebar: Viewing the report
#
# On successful creation of the report, the :meth:`~mne.Report.save` method
# will open the HTML in a new tab in the browser. To disable this, use the
# ``open_browser=False`` parameter of :meth:`~mne.Report.save`.
#
# For our first example, we'll generate a barebones report for all the
# :file:`.fif` files containing raw data in the sample dataset, by passing the
# pattern ``*raw.fif`` to :meth:`~mne.Report.parse_folder`. We'll omit the
# ``subject`` and ``subjects_dir`` parameters from the :class:`~mne.Report`
# constructor, but we'll also pass ``render_bem=False`` to the
# :meth:`~mne.Report.parse_folder` method — otherwise we would get a warning
# about not being able to render MRI and ``trans`` files without knowing the
# subject.
path = mne.datasets.sample.data_path(verbose=False)
report = mne.Report(verbose=True)
report.parse_folder(path, pattern='*raw.fif', render_bem=False)
report.save('report_basic.html', overwrite=True)
###############################################################################
# This report yields a textual summary of the :class:`~mne.io.Raw` files
# selected by the pattern. For a slightly more useful report, we'll ask for the
# power spectral density of the :class:`~mne.io.Raw` files, by passing
# ``raw_psd=True`` to the :class:`~mne.Report` constructor. We'll also
# visualize the SSP projectors stored in the raw data's `~mne.Info` dictionary
# by setting ``projs=True``. Lastly, let's also refine our pattern to select
# only the filtered raw recording (omitting the unfiltered data and the
# empty-room noise recordings):
pattern = 'sample_audvis_filt-0-40_raw.fif'
report = mne.Report(raw_psd=True, projs=True, verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_raw_psd.html', overwrite=True)
###############################################################################
# The sample dataset also contains SSP projectors stored as *individual files*.
# To add them to a report, we also have to provide the path to a file
# containing an `~mne.Info` dictionary, from which the channel locations can be
# read.
info_fname = os.path.join(path, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
pattern = 'sample_audvis_*proj.fif'
report = mne.Report(info_fname=info_fname, verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_proj.html', overwrite=True)
###############################################################################
# This time we'll pass a specific ``subject`` and ``subjects_dir`` (even though
# there's only one subject in the sample dataset) and remove our
# ``render_bem=False`` parameter so we can see the MRI slices, with BEM
# contours overlaid on top if available. Since this is computationally
# expensive, we'll also pass the ``mri_decim`` parameter for the benefit of our
# documentation servers, and skip processing the :file:`.fif` files:
subjects_dir = os.path.join(path, 'subjects')
report = mne.Report(subject='sample', subjects_dir=subjects_dir, verbose=True)
report.parse_folder(path, pattern='', mri_decim=25)
report.save('report_mri_bem.html', overwrite=True)
###############################################################################
# Now let's look at how :class:`~mne.Report` handles :class:`~mne.Evoked` data
# (we will skip the MRIs to save computation time). The following code will
# produce butterfly plots, topomaps, and comparisons of the global field
# power (GFP) for different experimental conditions.
pattern = 'sample_audvis-no-filter-ave.fif'
report = mne.Report(verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_evoked.html', overwrite=True)
###############################################################################
# You have probably noticed that the EEG recordings look particularly odd. This
# is because by default, `~mne.Report` does not apply baseline correction
# before rendering evoked data. So if the dataset you wish to add to the report
# has not been baseline-corrected already, you can request baseline correction
# here. The MNE sample dataset we're using in this example has **not** been
# baseline-corrected; so let's do this now for the report!
#
# To request baseline correction, pass a ``baseline`` argument to
# `~mne.Report`, which should be a tuple with the starting and ending time of
# the baseline period. For more details, see the documentation on
# `~mne.Evoked.apply_baseline`. Here, we will apply baseline correction for a
# baseline period from the beginning of the time interval to time point zero.
baseline = (None, 0)
pattern = 'sample_audvis-no-filter-ave.fif'
report = mne.Report(baseline=baseline, verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_evoked_baseline.html', overwrite=True)
###############################################################################
# To render whitened :class:`~mne.Evoked` files with baseline correction, pass
# the ``baseline`` argument we just used, and add the noise covariance file.
# This will display ERP/ERF plots for both the original and whitened
# :class:`~mne.Evoked` objects, but scalp topomaps only for the original.
cov_fname = os.path.join(path, 'MEG', 'sample', 'sample_audvis-cov.fif')
baseline = (None, 0)
report = mne.Report(cov_fname=cov_fname, baseline=baseline, verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_evoked_whitened.html', overwrite=True)
###############################################################################
# If you want to actually *view* the noise covariance in the report, make sure
# it is captured by the pattern passed to :meth:`~mne.Report.parse_folder`, and
# also include a source for an :class:`~mne.Info` object (any of the
# :class:`~mne.io.Raw`, :class:`~mne.Epochs` or :class:`~mne.Evoked`
# :file:`.fif` files that contain subject data also contain the measurement
# information and should work):
pattern = 'sample_audvis-cov.fif'
info_fname = os.path.join(path, 'MEG', 'sample', 'sample_audvis-ave.fif')
report = mne.Report(info_fname=info_fname, verbose=True)
report.parse_folder(path, pattern=pattern, render_bem=False)
report.save('report_cov.html', overwrite=True)
###############################################################################
# Adding custom plots to a report
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The Python interface has greater flexibility compared to the :ref:`command
# line interface <mne report>`. For example, custom plots can be added via
# the :meth:`~mne.Report.add_figs_to_section` method:
report = mne.Report(verbose=True)
fname_raw = os.path.join(path, 'MEG', 'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(fname_raw, verbose=False).crop(tmax=60)
events = mne.find_events(raw, stim_channel='STI 014')
event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'face': 5, 'buttonpress': 32}
# create some epochs and ensure we drop a few, so we can then plot the drop log
reject = dict(eeg=150e-6)
epochs = mne.Epochs(raw=raw, events=events, event_id=event_id,
tmin=-0.2, tmax=0.7, reject=reject, preload=True)
fig_drop_log = epochs.plot_drop_log(subject='sample', show=False)
# now also plot an evoked response
evoked_aud_left = epochs['auditory/left'].average()
fig_evoked = evoked_aud_left.plot(spatial_colors=True, show=False)
# add the custom plots to the report:
report.add_figs_to_section([fig_drop_log, fig_evoked],
captions=['Dropped Epochs',
'Evoked: Left Auditory'],
section='drop-and-evoked')
report.save('report_custom.html', overwrite=True)
###############################################################################
# Adding a slider
# ^^^^^^^^^^^^^^^
#
# Sliders provide an intuitive way for users to interactively browse a
# predefined set of images. You can add sliders via
# :meth:`~mne.Report.add_slider_to_section`:
report = mne.Report(verbose=True)
figs = list()
times = evoked_aud_left.times[::30]
for t in times:
figs.append(evoked_aud_left.plot_topomap(t, vmin=-300, vmax=300, res=100,
show=False))
plt.close(figs[-1])
report.add_slider_to_section(figs, times, 'Evoked Response',
image_format='png') # can also use 'svg'
report.save('report_slider.html', overwrite=True)
###############################################################################
# Adding ``SourceEstimate`` (STC) plot to a report
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Now we see how :class:`~mne.Report` handles :class:`~mne.SourceEstimate`
# data. The following will produce a source time course (STC) plot with vertex
# time courses. In this scenario, we also demonstrate how to use the
# :meth:`mne.viz.Brain.screenshot` method to save the figs in a slider.
report = mne.Report(verbose=True)
fname_stc = os.path.join(path, 'MEG', 'sample', 'sample_audvis-meg')
stc = mne.read_source_estimate(fname_stc, subject='sample')
figs = list()
kwargs = dict(subjects_dir=subjects_dir, initial_time=0.13,
clim=dict(kind='value', lims=[3, 6, 9]))
for hemi in ('lh', 'rh'):
brain = stc.plot(hemi=hemi, **kwargs)
brain.toggle_interface(False)
figs.append(brain.screenshot(time_viewer=True))
brain.close()
# add the stc plot to the report:
report.add_slider_to_section(figs)
report.save('report_stc.html', overwrite=True)
###############################################################################
# Managing report sections
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# The MNE report command internally manages the sections so that plots
# belonging to the same section are rendered consecutively. Within a section,
# the plots are ordered in the same order that they were added using the
# :meth:`~mne.Report.add_figs_to_section` command. Each section is identified
# by a toggle button in the top navigation bar of the report which can be used
# to show or hide the contents of the section. To toggle the show/hide state of
# all sections in the HTML report, press :kbd:`t`, or press the toggle-all
# button in the upper right.
#
# .. sidebar:: Structure
#
# Although we've been generating separate reports in each of these examples,
# you could easily create a single report for all :file:`.fif` files (raw,
# evoked, covariance, etc) by passing ``pattern='*.fif'``.
#
#
# Editing a saved report
# ^^^^^^^^^^^^^^^^^^^^^^
#
# Saving to HTML is a write-only operation, meaning that we cannot read an
# ``.html`` file back as a :class:`~mne.Report` object. In order to be able
# to edit a report once it's no longer in-memory in an active Python session,
# save it as an HDF5 file instead of HTML:
report.save('report.h5', overwrite=True)
report_from_disk = mne.open_report('report.h5')
print(report_from_disk)
###############################################################################
# This allows the possibility of multiple scripts adding figures to the same
# report. To make this even easier, :class:`mne.Report` can be used as a
# context manager:
with mne.open_report('report.h5') as report:
report.add_figs_to_section(fig_evoked,
captions='Left Auditory',
section='evoked',
replace=True)
report.save('report_final.html', overwrite=True)
###############################################################################
# With the context manager, the updated report is also automatically saved
# back to :file:`report.h5` upon leaving the block.
| bsd-3-clause |
dikien/Machine-Learning-Newspaper | nytimes/step4_BernoulliNB.py | 1 | 2163 | # -*- coding: UTF-8 -*-
from time import time
from step3_feature_engineering import preprocess_2
from sklearn.naive_bayes import BernoulliNB
from nltk.stem.snowball import SnowballStemmer
import numpy as np
features, labels, vectorizer, selector, le = preprocess_2("pkl/article_2_people.pkl", "pkl/lable_2_people.pkl")
t0 = time()
clf = BernoulliNB(alpha=1)
clf.fit(features, labels)
print "training time :", round(time()-t0, 3), "s"
t0 = time()
# s1 is NEIL GENZLINGER's article from http://www.nytimes.com/movies/movie/477894/Little-Hope-Was-Arson/overview
# Let's look at BernoulliNB will predict s1 is his article.
s1 = "There’s nothing fancy about “Little Hope Was Arson,” a documentary on the 2010 church fires in East Texas, and that’s the beauty of it. The filmmaker, Theo Love, presents the people in the story as they are, without passing judgment and without apology, whether they are investigators or pastors or just ordinary folks caught up in the inexplicable. It’s Americana unvarnished and, because of that, as absorbing as it is respectful. — Neil Genzlinger"
features_test = []
stemmer = SnowballStemmer("english", ignore_stopwords=True)
text_string = s1.decode('utf-8','ignore').split()
words = [stemmer.stem(text) for text in text_string]
words = " ".join(words)
features_test.append(words)
print "After stemmer s1 : %s " %features_test
features_test = np.array(features_test)
features_test_transformed = vectorizer.transform(features_test)
features_test_transformed = selector.transform(features_test_transformed).toarray()
y_pred = clf.predict(features_test_transformed)
print le.inverse_transform(y_pred)
'''
training time : 0.102 s
After stemmer s1 : [u'there noth fanci about \u201clittl hope was arson,\u201d a documentari on the 2010 church fire in east texas, and that the beauti of it. the filmmaker, theo love, present the peopl in the stori as they are, without pass judgment and without apology, whether they are investig or pastor or just ordinari folk caught up in the inexplicable. it americana unvarnish and, because of that, as absorb as it is respectful. \u2014 neil genzling']
['NEIL GENZLINGER']
''' | bsd-3-clause |
GaZ3ll3/scikit-image | doc/examples/plot_blob.py | 18 | 2796 | """
==============
Blob Detection
==============
Blobs are bright on dark or dark on bright regions in an image. In
this example, blobs are detected using 3 algorithms. The image used
in this case is the Hubble eXtreme Deep Field. Each bright dot in the
image is a star or a galaxy.
Laplacian of Gaussian (LoG)
-----------------------------
This is the most accurate and slowest approach. It computes the Laplacian
of Gaussian images with successively increasing standard deviation and
stacks them up in a cube. Blobs are local maximas in this cube. Detecting
larger blobs is especially slower because of larger kernel sizes during
convolution. Only bright blobs on dark backgrounds are detected. See
:py:meth:`skimage.feature.blob_log` for usage.
Difference of Gaussian (DoG)
----------------------------
This is a faster approximation of LoG approach. In this case the image is
blurred with increasing standard deviations and the difference between
two successively blurred images are stacked up in a cube. This method
suffers from the same disadvantage as LoG approach for detecting larger
blobs. Blobs are again assumed to be bright on dark. See
:py:meth:`skimage.feature.blob_dog` for usage.
Determinant of Hessian (DoH)
----------------------------
This is the fastest approach. It detects blobs by finding maximas in the
matrix of the Determinant of Hessian of the image. The detection speed is
independent of the size of blobs as internally the implementation uses
box filters instead of convolutions. Bright on dark as well as dark on
bright blobs are detected. The downside is that small blobs (<3px) are not
detected accurately. See :py:meth:`skimage.feature.blob_doh` for usage.
"""
from matplotlib import pyplot as plt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from math import sqrt
from skimage.color import rgb2gray
image = data.hubble_deep_field()[0:500, 0:500]
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
for blobs, color, title in sequence:
fig, ax = plt.subplots(1, 1)
ax.set_title(title)
ax.imshow(image, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax.add_patch(c)
plt.show()
| bsd-3-clause |
vortex-exoplanet/VIP | vip_hci/var/shapes.py | 2 | 27456 | #! /usr/bin/env python
"""
Module with various functions to create shapes, annuli and segments.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez'
__all__ = ['dist',
'dist_matrix',
'frame_center',
'get_square',
'get_circle',
'get_ellipse',
'get_annulus_segments',
'get_annular_wedge',
'get_ell_annulus',
'mask_circle',
'create_ringed_spider_mask',
'matrix_scaling',
'prepare_matrix',
'reshape_matrix']
import numpy as np
from skimage.draw import polygon
from skimage.draw import circle
from sklearn.preprocessing import scale
from ..conf.utils_conf import frame_or_shape
def mask_circle(array, radius, fillwith=0, mode='in'):
"""
Mask the pixels inside/outside of a centered circle with ``fillwith``.
Returns a modified copy of ``array``.
Parameters
----------
array : 2d/3d/4d numpy ndarray
Input frame or cube.
radius : int
Radius of the circular aperture.
fillwith : int, float or np.nan, optional
Value to put instead of the masked out pixels.
mode : {'in', 'out'}, optional
When set to 'in' then the pixels inside the radius are set to
``fillwith``. When set to 'out' the pixels outside the circular mask are
set to ``fillwith``.
Returns
-------
array_masked : numpy ndarray
Masked frame or cube.
"""
if not isinstance(fillwith, (int, float)):
raise ValueError('`fillwith` must be integer, float or np.nan')
cy, cx = frame_center(array)
shape = (array.shape[-2],array.shape[-1])
ind = circle(cy, cx, radius, shape=shape)
if mode == 'in':
array_masked = array.copy()
if array.ndim == 2:
array_masked[ind] = fillwith
elif array.ndim == 3:
array_masked[:, ind[1], ind[0]] = fillwith
elif array.ndim == 4:
array_masked[:, :, ind[1], ind[0]] = fillwith
elif mode == 'out':
array_masked = np.full_like(array, fillwith)
if array.ndim == 2:
array_masked[ind] = array[ind]
elif array.ndim == 3:
array_masked[:, ind[1], ind[0]] = array[:, ind[1], ind[0]]
elif array.ndim == 4:
array_masked[:, :, ind[1], ind[0]] = array[:, :, ind[1], ind[0]]
return array_masked
def create_ringed_spider_mask(im_shape, ann_out, ann_in=0, sp_width=10,
sp_angle=0, nlegs=6):
"""
Mask out information outside the annulus and inside the spiders (zeros).
Parameters
----------
im_shape : tuple of int
Tuple of length two with 2d array shape (Y,X).
ann_out : int
Outer radius of the annulus.
ann_in : int, opt
Inner radius of the annulus.
sp_width : int, opt
Width of the spider arms (6 legs by default).
sp_angle : int, opt
angle of the first spider arm (on the positive horizontal axis) in
counter-clockwise sense.
nlegs: int, opt
Number of legs of the spider.
Returns
-------
mask : numpy ndarray
2d array of zeros and ones.
"""
mask = np.zeros(im_shape)
nbranch = int(nlegs/2)
s = im_shape
r = min(s)/2
theta = np.arctan2(sp_width/2, r)
cy, cx = frame_center(mask)
rr0, cc0 = circle(cy, cx, min(ann_out, cy))
mask[rr0, cc0] = 1
t0 = np.array([theta, np.pi-theta, np.pi+theta, np.pi*2 - theta])
if isinstance(sp_angle, (list,np.ndarray)):
dtheta = [sp_angle[i]-sp_angle[0] for i in range(nbranch)]
else:
sp_angle = [sp_angle]
dtheta = [i*np.pi/nbranch for i in range(nbranch)]
tn = np.zeros([nbranch,4])
xn = np.zeros_like(tn)
yn = np.zeros_like(tn)
for i in range(nbranch):
tn[i] = t0 + np.deg2rad(sp_angle[0] + dtheta[i])
xn[i] = r * np.cos(tn[i]) + s[1]/2
yn[i] = r * np.sin(tn[i]) + s[0]/2
rrn, ccn = polygon(yn[i], xn[i])
mask[rrn, ccn] = 0
rr4, cc4 = circle(cy, cx, ann_in)
mask[rr4, cc4] = 0
return mask
def dist(yc, xc, y1, x1):
"""
Return the Euclidean distance between two points, or between an array
of positions and a point.
"""
return np.sqrt(np.power(yc-y1,2) + np.power(xc-x1,2))
def dist_matrix(n, cx=None, cy=None):
"""
Create matrix with euclidian distances from a reference point (cx, cy).
Parameters
----------
n : int
output image shape is (n, n)
cx,cy : float
reference point. Defaults to the center.
Returns
-------
im : ndarray with shape (n, n)
Notes
-----
This is a replacement for ANDROMEDA's DISTC.
"""
if cx is None:
cx = (n - 1) / 2
if cy is None:
cy = (n - 1) / 2
yy, xx = np.ogrid[:n, :n]
return np.sqrt((yy-cy)**2 + (xx-cx)**2)
def frame_center(array, verbose=False):
"""
Return the coordinates y,x of the frame(s) center.
Parameters
----------
array : 2d/3d/4d numpy ndarray
Frame or cube.
verbose : bool optional
If True the center coordinates are printed out.
Returns
-------
cy, cx : float
Coordinates of the center.
"""
if array.ndim == 2:
shape = array.shape
elif array.ndim == 3:
shape = array[0].shape
elif array.ndim == 4:
shape = array[0, 0].shape
else:
raise ValueError('`array` is not a 2d, 3d or 4d array')
cy = shape[0] / 2 - 0.5
cx = shape[1] / 2 - 0.5
if verbose:
print('Center px coordinates at x,y = ({}, {})'.format(cx, cy))
return cy, cx
def get_square(array, size, y, x, position=False, force=False, verbose=True):
"""
Return an square subframe from a 2d array or image.
Parameters
----------
array : 2d numpy ndarray
Input frame.
size : int
Size of the subframe.
y : int
Y coordinate of the center of the subframe (obtained with the function
``frame_center``).
x : int
X coordinate of the center of the subframe (obtained with the function
``frame_center``).
position : bool, optional
If set to True return also the coordinates of the bottom-left vertex.
force : bool, optional
Size and the size of the 2d array must be both even or odd. With
``force`` set to True this condition can be avoided.
verbose : bool optional
If True, warning messages might be shown.
Returns
-------
array_out : numpy ndarray
Sub array.
y0, x0 : int
[position=True] Coordinates of the bottom-left vertex.
"""
size_init_y = array.shape[0]
size_init_x = array.shape[1]
size_init = array.shape[0] # "force" cases assume square input frame
if array.ndim != 2:
raise TypeError('Input array is not a 2d array.')
if not isinstance(size, int):
raise TypeError('`Size` must be integer')
if size >= size_init_y and size >= size_init_x: # assuming square frames
msg = "`Size` is equal to or bigger than the initial frame size"
raise ValueError(msg)
if not force:
# Even input size
if size_init % 2 == 0:
# Odd size
if size % 2 != 0:
size += 1
if verbose:
print("`Size` is odd (while input frame size is even). "
"Setting `size` to {} pixels".format(size))
# Odd input size
else:
# Even size
if size % 2 == 0:
size += 1
if verbose:
print("`Size` is even (while input frame size is odd). "
"Setting `size` to {} pixels".format(size))
else:
# Even input size
if size_init % 2 == 0:
# Odd size
if size % 2 != 0 and verbose:
print("WARNING: `size` is odd while input frame size is even. "
"Make sure the center coordinates are set properly")
# Odd input size
else:
# Even size
if size % 2 == 0 and verbose:
print("WARNING: `size` is even while input frame size is odd. "
"Make sure the center coordinates are set properly")
# wing is added to the sides of the subframe center
wing = (size - 1) / 2
y0 = int(y - wing)
y1 = int(y + wing + 1) # +1 cause endpoint is excluded when slicing
x0 = int(x - wing)
x1 = int(x + wing + 1)
if y0 < 0 or x0 < 0 or y1 > size_init_y or x1 > size_init_x:
# assuming square frames
raise RuntimeError('square cannot be obtained with size={}, y={}, x={}'
''.format(size, y, x))
array_out = array[y0: y1, x0: x1].copy()
if position:
return array_out, y0, x0
else:
return array_out
def get_circle(array, radius, cy=None, cx=None, mode="mask"):
"""
Return a centered circular region from a 2d ndarray.
Parameters
----------
array : numpy ndarray
Input 2d array or image.
radius : int
The radius of the circular region.
cy, cx : int, optional
Coordinates of the circle center. If one of them is ``None``, the center
of ``array`` is used.
mode : {'mask', 'val'}, optional
Controls what is returned: array with circular mask applied, or values
of the pixels in the circular region.
Returns
-------
masked : numpy ndarray
[mode="mask"] Input array with the circular mask applied.
values : numpy ndarray
[mode="val"] 1d array with the values of the pixels in the circular
region.
Notes
-----
An alternative implementation would use ``skimage.draw.circle``. ``circle``
performs better on large ``array``s (e.g. 1000px, 10.000px), while the
current implementation is faster for small ``array``s (e.g. 100px). See
`test_shapes.py` for benchmark details.
"""
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array.')
sy, sx = array.shape
if cy is None or cx is None:
cy, cx = frame_center(array, verbose=False)
# ogrid is a multidim mesh creator (faster than mgrid):
yy, xx = np.ogrid[:sy, :sx]
circle = (yy - cy) ** 2 + (xx - cx) ** 2 # eq of circle. sq dist to center
circle_mask = circle < radius ** 2 # boolean mask
if mode == "mask":
return array * circle_mask
elif mode == "val":
return array[circle_mask]
else:
raise ValueError("mode '{}' unknown!".format(mode))
def get_ellipse(data, a, b, pa, cy=None, cx=None, mode="ind"):
"""
Return a centered elliptical region from a 2d ndarray.
Parameters
----------
data : numpy ndarray or tuple
Input 2d array (image) or tuple with a shape.
a : float
Semi-major axis.
b : float
Semi-minor axis.
pa : deg, float
The PA of the semi-major axis in degrees.
cy, cx : int or None, optional
Coordinates of the circle center. If ``None``, the center is determined
by the ``frame_center`` function.
mode : {'ind', 'val', 'mask', 'bool'}, optional
Controls what is returned: indices of selected pixels, values of
selected pixels, or a boolean mask.
Returns
-------
indices : tuple(y, x)
[mode='ind'] Coordinates of the inner elliptical region.
values : 1d ndarray
[mode='val'] Values of the pixels in the inner elliptical region.
masked : 2d ndarray
[mode='mask'] Input image where the outer region is masked with ``0``.
bool_mask : 2d boolean ndarray
[mode='bool'] A boolean mask where ``True`` is the inner region.
"""
def distance(yc, xc, y1, x1):
return np.sqrt((yc - y1) ** 2 + (xc - x1) ** 2)
# --------------------------------------------------------------------------
array = frame_or_shape(data)
if cy is None or cx is None:
cy, cx = frame_center(array, verbose=False)
# Definition of other parameters of the ellipse
f = np.sqrt(a ** 2 - b ** 2) # dist between center and foci of the ellipse
pa_rad = np.deg2rad(pa)
pos_f1 = (cy + f * np.cos(pa_rad), cx + f * np.sin(pa_rad)) # first focus
pos_f2 = (cy - f * np.cos(pa_rad), cx - f * np.sin(pa_rad)) # second focus
# ogrid is a multidim mesh creator (faster than mgrid):
yy, xx = np.ogrid[:array.shape[0], :array.shape[1]]
ellipse = (distance(yy, xx, pos_f1[0], pos_f1[1]) +
distance(yy, xx, pos_f2[0], pos_f2[1]))
ellipse_mask = ellipse < 2 * a # boolean mask
if mode == "ind":
return np.where(ellipse_mask)
elif mode == "val":
return array[ellipse_mask]
elif mode == "mask":
return array * ellipse_mask
elif mode == "bool":
return ellipse_mask
else:
raise ValueError("mode '{}' unknown!".format(mode))
def get_annulus_segments(data, inner_radius, width, nsegm=1, theta_init=0,
optim_scale_fact=1, mode="ind"):
"""
Return indices or values in segments of a centerered annulus.
The annulus is defined by ``inner_radius <= annulus < inner_radius+width``.
Parameters
----------
data : 2d numpy ndarray or tuple
Input 2d array (image) ot tuple with its shape.
inner_radius : float
The inner radius of the donut region.
width : float
The size of the annulus.
nsegm : int
Number of segments of annulus to be extracted.
theta_init : int
Initial azimuth [degrees] of the first segment, counting from the
positive x-axis counterclockwise.
optim_scale_fact : float
To enlarge the width of the segments, which can then be used as
optimization segments (e.g. in LOCI).
mode : {'ind', 'val', 'mask'}, optional
Controls what is returned: indices of selected pixels, values of
selected pixels, or a boolean mask.
Returns
-------
indices : list of ndarrays
[mode='ind'] Coordinates of pixels for each annulus segment.
values : list of ndarrays
[mode='val'] Pixel values.
masked : list of ndarrays
[mode='mask'] Copy of ``data`` with masked out regions.
Notes
-----
Moving from ``get_annulus`` to ``get_annulus_segments``:
.. code::python
# get_annulus handles one single segment only, so note the ``[0]`` after
the call to get_annulus_segments if you want to work with one single
segment only.
get_annulus(arr, 2, 3, output_indices=True)
# is the same as
get_annulus_segments(arr, 2, 3)[0]
get_annulus(arr, inr, w, output_values=True)
# is the same as
get_annulus_segments(arr, inr, w, mode="val")[0]
get_annulus(arr, inr, w)
# is the same as
get_annulus_segments(arr, inr, w, mode="mask")[0]
# the only difference is the handling of the border values:
# get_annulus_segments is `in <= ann < out`, while get_annulus is
# `in <= ann <= out`. But that should make no difference in practice.
"""
array = frame_or_shape(data)
if not isinstance(nsegm, int):
raise TypeError('`nsegm` must be an integer')
cy, cx = frame_center(array)
azimuth_coverage = np.deg2rad(int(np.ceil(360 / nsegm)))
twopi = 2 * np.pi
yy, xx = np.mgrid[:array.shape[0], :array.shape[1]]
rad = np.sqrt((xx - cx) ** 2 + (yy - cy) ** 2)
phi = np.arctan2(yy - cy, xx - cx)
phirot = phi % twopi
outer_radius = inner_radius + (width*optim_scale_fact)
masks = []
for i in range(nsegm):
phi_start = np.deg2rad(theta_init) + (i * azimuth_coverage)
phi_end = phi_start + azimuth_coverage
if phi_start < twopi and phi_end > twopi:
masks.append((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start) & (phirot <= twopi) |
(rad >= inner_radius) & (rad < outer_radius) &
(phirot >= 0) & (phirot < phi_end - twopi))
elif phi_start >= twopi and phi_end > twopi:
masks.append((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start - twopi) &
(phirot < phi_end - twopi))
else:
masks.append((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start) & (phirot < phi_end))
if mode == "ind":
return [np.where(mask) for mask in masks]
elif mode == "val":
return [array[mask] for mask in masks]
elif mode == "mask":
return [array*mask for mask in masks]
else:
raise ValueError("mode '{}' unknown!".format(mode))
def get_annular_wedge(data, inner_radius, width, wedge=(0,360), mode="ind"):
"""
Return indices or values in segments of a centered annulus.
The annulus is defined by ``inner_radius <= annulus < inner_radius+width``.
Parameters
----------
data : 2d numpy ndarray or tuple
Input 2d array (image) ot tuple with its shape.
inner_radius : float
The inner radius of the donut region.
width : float
The size of the annulus.
wedge : tuple of 2 floats
Initial and final azimuths [degrees] of the annular segment, counting
from the positive x-axis counter-clockwise.
mode : {'ind', 'val', 'mask'}, optional
Controls what is returned: indices of selected pixels, values of
selected pixels, or a boolean mask.
Returns
-------
indices : list of ndarrays
[mode='ind'] Coordinates of pixels for each annulus segment.
values : list of ndarrays
[mode='val'] Pixel values.
masked : list of ndarrays
[mode='mask'] Copy of ``data`` with masked out regions.
Notes
-----
Moving from ``get_annulus`` to ``get_annulus_segments``:
.. code::python
# get_annulus handles one single segment only, so note the ``[0]`` after
the call to get_annulus_segments if you want to work with one single
segment only.
get_annulus(arr, 2, 3, output_indices=True)
# is the same as
get_annulus_segments(arr, 2, 3)[0]
get_annulus(arr, inr, w, output_values=True)
# is the same as
get_annulus_segments(arr, inr, w, mode="val")[0]
get_annulus(arr, inr, w)
# is the same as
get_annulus_segments(arr, inr, w, mode="mask")[0]
# the only difference is the handling of the border values:
# get_annulus_segments is `in <= ann < out`, while get_annulus is
# `in <= ann <= out`. But that should make no difference in practice.
"""
array = frame_or_shape(data)
cy, cx = frame_center(array)
azimuth_coverage = np.deg2rad(wedge[1]-wedge[0])
twopi = 2 * np.pi
yy, xx = np.mgrid[:array.shape[0], :array.shape[1]]
rad = np.sqrt((xx - cx) ** 2 + (yy - cy) ** 2)
phi = np.arctan2(yy - cy, xx - cx)
phirot = phi % twopi
outer_radius = inner_radius + width
phi_start = np.deg2rad(wedge[0])
phi_end = phi_start + azimuth_coverage
if phi_start < twopi and phi_end > twopi:
mask = ((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start) & (phirot <= twopi) |
(rad >= inner_radius) & (rad < outer_radius) &
(phirot >= 0) & (phirot < phi_end - twopi))
elif phi_start >= twopi and phi_end > twopi:
mask = ((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start - twopi) &
(phirot < phi_end - twopi))
else:
mask = ((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start) & (phirot < phi_end))
if mode == "ind":
return np.where(mask)
elif mode == "val":
return array[mask]
elif mode == "mask":
return array*mask
else:
raise ValueError("mode '{}' unknown!".format(mode))
def get_ell_annulus(data, a, b, PA, width, cy=None, cx=None, mode="ind"):
"""
Return a centered elliptical annulus from a 2d ndarray
All the rest pixels are set to zeros.
Parameters
----------
data : numpy ndarray or tuple
Input 2d array (image) or tuple with a shape.
a : float
Semi-major axis.
b : float
Semi-minor axis.
PA : deg, float
The PA of the semi-major axis in degrees.
width : float
The size of the annulus along the semi-major axis; it is proportionnally
thinner along the semi-minor axis.
output_values : {False, True}, optional
If True returns the values of the pixels in the annulus.
cy, cx : int or None, optional
Coordinates of the circle center. If ``None``, the center is determined
by the ``frame_center`` function.
mode : {'ind', 'val', 'mask'}, optional
Controls what is returned: indices of selected pixels, values of
selected pixels, or a boolean mask.
Returns
-------
indices : tuple(y, x)
[mode='ind'] Coordinates of the inner elliptical region.
values : 1d ndarray
[mode='val'] Values of the pixels in the inner elliptical region.
masked : 2d ndarray
[mode='mask'] Input image where the outer region is masked with ``0``.
"""
array = frame_or_shape(data)
hwa = width / 2 # half width for a
hwb = (width * b / a) / 2 # half width for b
big_ellipse = get_ellipse(array, a + hwa, b + hwb, PA, cy=cy, cx=cx,
mode="bool")
small_ellipse = get_ellipse(array, a - hwa, b - hwb, PA, cy=cy, cx=cx,
mode="bool")
ell_ann_mask = big_ellipse ^ small_ellipse
if mode == "ind":
return np.where(ell_ann_mask)
elif mode == "val":
return array[ell_ann_mask]
elif mode == "mask":
return array * ell_ann_mask
elif mode == "bool":
return ell_ann_mask
else:
raise ValueError("mode '{}' unknown!".format(mode))
def matrix_scaling(matrix, scaling):
"""
Scale a matrix using ``sklearn.preprocessing.scale`` function.
Parameters
----------
matrix : 2d numpy ndarray
Input 2d array.
scaling : None or string
Scaling method.
``None``
no scaling is performed on the input data before SVD
``"temp-mean"``
temporal px-wise mean subtraction
``"spat-mean"``
the spatial mean is subtracted
``temp-standard"``
temporal mean centering plus scaling to unit variance
``"spat-standard"``
spatial mean centering plus scaling to unit variance
Returns
-------
matrix : 2d numpy ndarray
2d array with scaled values.
"""
if scaling is None:
pass
elif scaling == 'temp-mean':
matrix = scale(matrix, with_mean=True, with_std=False)
elif scaling == 'spat-mean':
matrix = scale(matrix, with_mean=True, with_std=False, axis=1)
elif scaling == 'temp-standard':
matrix = scale(matrix, with_mean=True, with_std=True)
elif scaling == 'spat-standard':
matrix = scale(matrix, with_mean=True, with_std=True, axis=1)
else:
raise ValueError('Scaling mode not recognized')
return matrix
def prepare_matrix(array, scaling=None, mask_center_px=None, mode='fullfr',
inner_radius=None, outer_radius=None, verbose=True):
"""
Build the matrix for the SVD/PCA and other matrix decompositions.
Center the data and mask the frame's central area if needed.
Parameters
----------
array : 3d numpy ndarray
Input cube.
scaling : {None, "temp-mean", spat-mean", "temp-standard", "spat-standard"},
None or str optional
Pixel-wise scaling mode using ``sklearn.preprocessing.scale`` function.
If set to None, the input matrix is left untouched. Otherwise:
``temp-mean``: temporal px-wise mean is subtracted.
``spat-mean``: spatial mean is subtracted.
``temp-standard``: temporal mean centering plus scaling pixel values
to unit variance.
``spat-standard``: spatial mean centering plus scaling pixel values
to unit variance.
mask_center_px : None or int, optional
[mode='fullfr'] Whether to mask the center of the frames or not.
mode : {'fullfr', 'annular'}, optional
Whether to use the whole frames or a single annulus.
inner_radius : int or float, optional
[mode='annular'] Distance in pixels from the center of the frame to the
inner radius of the annulus.
outer_radius : int or float, optional
[mode='annular'] Distance in pixels from the center of the frame to the
outer radius of the annulus.
verbose : bool, optional
If True prints intermediate info.
Returns
-------
matrix : 2d numpy ndarray
Out matrix whose rows are vectorized frames from the input cube.
ind : tuple
[mode='annular'] Indices of the annulus as ``(yy, xx)``.
"""
if mode == 'annular':
if inner_radius is None or outer_radius is None:
raise ValueError('`inner_radius` and `outer_radius` must be defined'
' in annular mode')
fr_size = array.shape[1]
annulus_width = int(np.round(outer_radius - inner_radius))
ind = get_annulus_segments((fr_size, fr_size), inner_radius,
annulus_width, nsegm=1)[0]
yy, xx = ind
matrix = array[:, yy, xx]
matrix = matrix_scaling(matrix, scaling)
if verbose:
msg = 'Done vectorizing the cube annulus. Matrix shape: ({}, {})'
print(msg.format(matrix.shape[0], matrix.shape[1]))
return matrix, ind
elif mode == 'fullfr':
if mask_center_px:
array = mask_circle(array, mask_center_px)
nfr = array.shape[0]
matrix = np.reshape(array, (nfr, -1)) # == for i: array[i].flatten()
matrix = matrix_scaling(matrix, scaling)
if verbose:
msg = 'Done vectorizing the frames. Matrix shape: ({}, {})'
print(msg.format(matrix.shape[0], matrix.shape[1]))
return matrix
def reshape_matrix(array, y, x):
"""
Convert a matrix whose rows are vect. frames to a cube with reshaped frames.
Parameters
----------
array : 2d ndarray
Input data of shape ``(nframes, npixels)``. Every row (``array[n]``)
corresponds to one vectorized ("flattened") 2d frame.
y, x : int
desired height and width of the frames. ``y*x = npixels``
Returns
-------
cube : 3d ndarray
Cube of shape ``(nframes, y, x)``.
Examples
--------
.. code:: python
In [1]: vect_frames = np.array([[1, 1, 1, 2, 2, 2], [1, 2, 3, 4, 5, 6]])
In [2]: cube = vip.var.reshape_matrix(vect_frames, 2, 3)
In [3]: cube
Out[3]:
array([[[1, 1, 1],
[2, 2, 2]],
[[1, 2, 3],
[4, 5, 6]]])
In [4]: cube.shape
Out[4]: (2, 2, 3)
"""
return array.reshape(array.shape[0], y, x)
| mit |
mugizico/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
GedRap/voyager | backtesting/Portfolio.py | 2 | 5132 | import pandas as pd
import numpy as np
import math
import copy
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.tsutil as tsu
from pandas import *
#Holds portfolio related data such as cash available and assets held
#Performs calculations related to orders and assets (e.g. value of all
#assets at a given time
class Portfolio:
def __init__(self, market, cash):
"""
Initialize the portfolio
Creates empty data frames, time series and other data structures
which will be populated during the execution
"""
self.market = market
#List of timestamps with dates when market is open
self.trading_days = self.market.get_trading_days()
#Initial cash
self.cash = cash
#List of orders to be executed
self.orders = []
#Set of traded symbols
self.traded_symbols = set()
#Timeseries, which stores cash balance
self.cash_ts = pd.Series(cash, index=self.trading_days)
#Products of number of shares and the stock price at any given date
self.holdings_value = DataFrame(self.market.get_trading_days_ts())
#Total value of all assets held at given date
self.holdings_value_sum = pd.Series(0,index=self.trading_days)
#Number of shares held at a given date
self.holdings_shares = DataFrame(self.market.get_trading_days_ts())
#Overall portfolio value (holdings+cash)
self.portfolio_value = pd.Series(0, index=self.trading_days)
def add_order(self, order):
"""Add order to the list of orders to be executed"""
self.orders.append(order)
def sort_orders(self):
"""Sort orders by timestamp in the ascending order"""
self.orders.sort(key=lambda x: x.timestamp, reverse=False)
def calculate_number_of_shares_held(self):
"""
'execute' all in the portfolio orders
Populates holdings_shares data frame, which stores number
of shares held for given symbol and given time
"""
self.sort_orders()
for order in self.orders:
if not order.symbol in self.holdings_shares:
#symb_time_series = Series(0, index=self.market.get_trading_days())
self.holdings_shares[order.symbol] = 0
self.traded_symbols.add(order.symbol)
symb_time_series = self.holdings_shares[order.symbol]
self.holdings_shares[order.symbol] = order.update_number_of_shares_held(symb_time_series)
self.update_cash_ts_with_order(order)
def update_cash_ts_with_order(self, order, price='close'):
"""
Execute order on cash time series
Calculates the value of the order and updates the time
series of cash balance respectively
"""
quantity = order.quantity
sharePrice = self.market.get_stock_price(order.symbol,order.timestamp,price)
orderValue = quantity * sharePrice
if order.type == order.TYPE_BUY:
orderValue = orderValue * -1
self.cash_ts[order.timestamp:] = self.cash_ts[order.timestamp] + orderValue
def get_holding_value(self,symbol,timestamp):
"""
Get a holding value for any given date
"""
return self.holdings_value[symbol][timestamp]
def calculate_holdings_value_for_each_symbol(self):
"""
Get all holdings (shares in the portfolio) value for every day
"""
self.market.check_if_data_loaded()
for symbol in self.traded_symbols:
#Time series of number of shares held
shares_held = self.holdings_shares[symbol]
#Time series of close prices
stock_prices = self.market.get_symbol_ts(symbol,"close")
#Compute value by multiplying the price and number
#of shares for every day
self.holdings_value[symbol] = (shares_held * stock_prices)
def calculate_holdings_value_sum(self):
"""
Populate a time series, which holds the value of all
holdings (shares) at the given date
"""
for index, series in self.holdings_value.iterrows():
self.holdings_value_sum[index] = series.sum()
def calculate_portfolio_value(self):
"""
Calculate total portfolio value (holdings+cash) and save it in time
series
"""
self.portfolio_value = self.holdings_value_sum + self.cash_ts
def execute(self):
"""
Completely execute all orders. Only this execution function should be
called from outside the class.
It does:
1) Populates time series with number of shares held for every share
2) Calculates cash balance for every date
3) Calculates holdings value, for every share
4) Sums all holdings values for any given date and saves as a
time series
"""
self.calculate_number_of_shares_held()
self.calculate_holdings_value_for_each_symbol()
self.calculate_holdings_value_sum()
| mit |
yousrabk/mne-python | examples/forward/plot_make_forward.py | 20 | 2669 | """
======================================================
Create a forward operator and display sensitivity maps
======================================================
Sensitivity maps can be produced from forward operators that
indicate how well different sensor types will be able to detect
neural currents from different regions of the brain.
"""
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
trans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
src = data_path + '/subjects/sample/bem/sample-oct-6-src.fif'
bem = data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif'
subjects_dir = data_path + '/subjects'
# Note that forward solutions can also be read with read_forward_solution
fwd = mne.make_forward_solution(raw_fname, trans, src, bem,
fname=None, meg=True, eeg=True, mindist=5.0,
n_jobs=2, overwrite=True)
# convert to surface orientation for better visualization
fwd = mne.convert_forward_solution(fwd, surf_ori=True)
leadfield = fwd['sol']['data']
print("Leadfield size : %d x %d" % leadfield.shape)
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
###############################################################################
# Show gain matrix a.k.a. leadfield matrix with sensitivity map
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
cmap='RdBu_r')
ax.set_title(ch_type.upper())
ax.set_xlabel('sources')
ax.set_ylabel('sensors')
plt.colorbar(im, ax=ax, cmap='RdBu_r')
plt.show()
plt.figure()
plt.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
color=['c', 'b', 'k'])
plt.legend()
plt.title('Normal orientation sensitivity')
plt.xlabel('sensitivity')
plt.ylabel('count')
plt.show()
grad_map.plot(time_label='Gradiometer sensitivity', subjects_dir=subjects_dir,
clim=dict(lims=[0, 50, 100]))
| bsd-3-clause |
jreback/pandas | pandas/tests/io/parser/test_c_parser_only.py | 1 | 23042 | """
Tests that apply specifically to the CParser. Unless specifically stated
as a CParser-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the Python parser can accept
further arguments when parsing.
"""
from io import BytesIO, StringIO, TextIOWrapper
import mmap
import os
import tarfile
import numpy as np
import pytest
from pandas.compat import IS64
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import DataFrame, concat
import pandas._testing as tm
@pytest.mark.parametrize(
"malformed",
["1\r1\r1\r 1\r 1\r", "1\r1\r1\r 1\r 1\r11\r", "1\r1\r1\r 1\r 1\r11\r1\r"],
ids=["words pointer", "stream pointer", "lines pointer"],
)
def test_buffer_overflow(c_parser_only, malformed):
# see gh-9205: test certain malformed input files that cause
# buffer overflows in tokenizer.c
msg = "Buffer overflow caught - possible malformed input file."
parser = c_parser_only
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(malformed))
def test_buffer_rd_bytes(c_parser_only):
# see gh-12098: src->buffer in the C parser can be freed twice leading
# to a segfault if a corrupt gzip file is read with 'read_csv', and the
# buffer is filled more than once before gzip raises an Exception.
data = (
"\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09"
"\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0"
"\xA6\x4D" + "\x55" * 267 + "\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00"
"\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO"
)
parser = c_parser_only
with tm.assert_produces_warning(RuntimeWarning):
# compression has no effect when passing a non-binary object as input
for _ in range(100):
try:
parser.read_csv(
StringIO(data), compression="gzip", delim_whitespace=True
)
except Exception:
pass
def test_delim_whitespace_custom_terminator(c_parser_only):
# See gh-12912
data = "a b c~1 2 3~4 5 6~7 8 9"
parser = c_parser_only
df = parser.read_csv(StringIO(data), lineterminator="~", delim_whitespace=True)
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_dtype_and_names_error(c_parser_only):
# see gh-8833: passing both dtype and names
# resulting in an error reporting issue
parser = c_parser_only
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = parser.read_csv(StringIO(data), sep=r"\s+", header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = parser.read_csv(StringIO(data), sep=r"\s+", header=None, names=["a", "b"])
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
# fallback casting
result = parser.read_csv(
StringIO(data), sep=r"\s+", header=None, names=["a", "b"], dtype={"a": np.int32}
)
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=["a", "b"])
expected["a"] = expected["a"].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with pytest.raises(ValueError, match="cannot safely convert"):
parser.read_csv(
StringIO(data),
sep=r"\s+",
header=None,
names=["a", "b"],
dtype={"a": np.int32},
)
@pytest.mark.parametrize(
"match,kwargs",
[
# For each of these cases, all of the dtypes are valid, just unsupported.
(
(
"the dtype datetime64 is not supported for parsing, "
"pass this column using parse_dates instead"
),
{"dtype": {"A": "datetime64", "B": "float64"}},
),
(
(
"the dtype datetime64 is not supported for parsing, "
"pass this column using parse_dates instead"
),
{"dtype": {"A": "datetime64", "B": "float64"}, "parse_dates": ["B"]},
),
(
"the dtype timedelta64 is not supported for parsing",
{"dtype": {"A": "timedelta64", "B": "float64"}},
),
("the dtype <U8 is not supported for parsing", {"dtype": {"A": "U8"}}),
],
ids=["dt64-0", "dt64-1", "td64", "<U8"],
)
def test_unsupported_dtype(c_parser_only, match, kwargs):
parser = c_parser_only
df = DataFrame(
np.random.rand(5, 2), columns=list("AB"), index=["1A", "1B", "1C", "1D", "1E"]
)
with tm.ensure_clean("__unsupported_dtype__.csv") as path:
df.to_csv(path)
with pytest.raises(TypeError, match=match):
parser.read_csv(path, index_col=0, **kwargs)
@td.skip_if_32bit
def test_precise_conversion(c_parser_only):
from decimal import Decimal
parser = c_parser_only
normal_errors = []
precise_errors = []
# test numbers between 1 and 2
for num in np.linspace(1.0, 2.0, num=500):
# 25 decimal digits of precision
text = f"a\n{num:.25}"
normal_val = float(
parser.read_csv(StringIO(text), float_precision="legacy")["a"][0]
)
precise_val = float(
parser.read_csv(StringIO(text), float_precision="high")["a"][0]
)
roundtrip_val = float(
parser.read_csv(StringIO(text), float_precision="round_trip")["a"][0]
)
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal(f"{val:.100}") - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
assert roundtrip_val == float(text[2:])
assert sum(precise_errors) <= sum(normal_errors)
assert max(precise_errors) <= max(normal_errors)
def test_usecols_dtypes(c_parser_only):
parser = c_parser_only
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = parser.read_csv(
StringIO(data),
usecols=(0, 1, 2),
names=("a", "b", "c"),
header=None,
converters={"a": str},
dtype={"b": int, "c": float},
)
result2 = parser.read_csv(
StringIO(data),
usecols=(0, 2),
names=("a", "b", "c"),
header=None,
converters={"a": str},
dtype={"b": int, "c": float},
)
assert (result.dtypes == [object, int, float]).all()
assert (result2.dtypes == [object, float]).all()
def test_disable_bool_parsing(c_parser_only):
# see gh-2090
parser = c_parser_only
data = """A,B,C
Yes,No,Yes
No,Yes,Yes
Yes,,Yes
No,No,No"""
result = parser.read_csv(StringIO(data), dtype=object)
assert (result.dtypes == object).all()
result = parser.read_csv(StringIO(data), dtype=object, na_filter=False)
assert result["B"][2] == ""
def test_custom_lineterminator(c_parser_only):
parser = c_parser_only
data = "a,b,c~1,2,3~4,5,6"
result = parser.read_csv(StringIO(data), lineterminator="~")
expected = parser.read_csv(StringIO(data.replace("~", "\n")))
tm.assert_frame_equal(result, expected)
def test_parse_ragged_csv(c_parser_only):
parser = c_parser_only
data = """1,2,3
1,2,3,4
1,2,3,4,5
1,2
1,2,3,4"""
nice_data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
result = parser.read_csv(
StringIO(data), header=None, names=["a", "b", "c", "d", "e"]
)
expected = parser.read_csv(
StringIO(nice_data), header=None, names=["a", "b", "c", "d", "e"]
)
tm.assert_frame_equal(result, expected)
# too many columns, cause segfault if not careful
data = "1,2\n3,4,5"
result = parser.read_csv(StringIO(data), header=None, names=range(50))
expected = parser.read_csv(StringIO(data), header=None, names=range(3)).reindex(
columns=range(50)
)
tm.assert_frame_equal(result, expected)
def test_tokenize_CR_with_quoting(c_parser_only):
# see gh-3453
parser = c_parser_only
data = ' a,b,c\r"a,b","e,d","f,f"'
result = parser.read_csv(StringIO(data), header=None)
expected = parser.read_csv(StringIO(data.replace("\r", "\n")), header=None)
tm.assert_frame_equal(result, expected)
result = parser.read_csv(StringIO(data))
expected = parser.read_csv(StringIO(data.replace("\r", "\n")))
tm.assert_frame_equal(result, expected)
def test_grow_boundary_at_cap(c_parser_only):
# See gh-12494
#
# Cause of error was that the C parser
# was not increasing the buffer size when
# the desired space would fill the buffer
# to capacity, which would later cause a
# buffer overflow error when checking the
# EOF terminator of the CSV stream.
parser = c_parser_only
def test_empty_header_read(count):
s = StringIO("," * count)
expected = DataFrame(columns=[f"Unnamed: {i}" for i in range(count + 1)])
df = parser.read_csv(s)
tm.assert_frame_equal(df, expected)
for cnt in range(1, 101):
test_empty_header_read(cnt)
def test_parse_trim_buffers(c_parser_only):
# This test is part of a bugfix for gh-13703. It attempts to
# to stress the system memory allocator, to cause it to move the
# stream buffer and either let the OS reclaim the region, or let
# other memory requests of parser otherwise modify the contents
# of memory space, where it was formally located.
# This test is designed to cause a `segfault` with unpatched
# `tokenizer.c`. Sometimes the test fails on `segfault`, other
# times it fails due to memory corruption, which causes the
# loaded DataFrame to differ from the expected one.
parser = c_parser_only
# Generate a large mixed-type CSV file on-the-fly (one record is
# approx 1.5KiB).
record_ = (
"""9999-9,99:99,,,,ZZ,ZZ,,,ZZZ-ZZZZ,.Z-ZZZZ,-9.99,,,9.99,Z"""
"""ZZZZ,,-99,9,ZZZ-ZZZZ,ZZ-ZZZZ,,9.99,ZZZ-ZZZZZ,ZZZ-ZZZZZ,"""
"""ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,9"""
"""99,ZZZ-ZZZZ,,ZZ-ZZZZ,,,,,ZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,,,9,9,"""
"""9,9,99,99,999,999,ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,9,ZZ-ZZZZ,9."""
"""99,ZZ-ZZZZ,ZZ-ZZZZ,,,,ZZZZ,,,ZZ,ZZ,,,,,,,,,,,,,9,,,999."""
"""99,999.99,,,ZZZZZ,,,Z9,,,,,,,ZZZ,ZZZ,,,,,,,,,,,ZZZZZ,ZZ"""
"""ZZZ,ZZZ-ZZZZZZ,ZZZ-ZZZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZ"""
"""ZZ,,,999999,999999,ZZZ,ZZZ,,,ZZZ,ZZZ,999.99,999.99,,,,Z"""
"""ZZ-ZZZ,ZZZ-ZZZ,-9.99,-9.99,9,9,,99,,9.99,9.99,9,9,9.99,"""
"""9.99,,,,9.99,9.99,,99,,99,9.99,9.99,,,ZZZ,ZZZ,,999.99,,"""
"""999.99,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,ZZZZZ,ZZZZZ,ZZZ,ZZZ,9,9,"""
""",,,,,ZZZ-ZZZZ,ZZZ999Z,,,999.99,,999.99,ZZZ-ZZZZ,,,9.999"""
""",9.999,9.999,9.999,-9.999,-9.999,-9.999,-9.999,9.999,9."""
"""999,9.999,9.999,9.999,9.999,9.999,9.999,99999,ZZZ-ZZZZ,"""
""",9.99,ZZZ,,,,,,,,ZZZ,,,,,9,,,,9,,,,,,,,,,ZZZ-ZZZZ,ZZZ-Z"""
"""ZZZ,,ZZZZZ,ZZZZZ,ZZZZZ,ZZZZZ,,,9.99,,ZZ-ZZZZ,ZZ-ZZZZ,ZZ"""
""",999,,,,ZZ-ZZZZ,ZZZ,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,99.99,99.99"""
""",,,9.99,9.99,9.99,9.99,ZZZ-ZZZZ,,,ZZZ-ZZZZZ,,,,,-9.99,-"""
"""9.99,-9.99,-9.99,,,,,,,,,ZZZ-ZZZZ,,9,9.99,9.99,99ZZ,,-9"""
""".99,-9.99,ZZZ-ZZZZ,,,,,,,ZZZ-ZZZZ,9.99,9.99,9999,,,,,,,"""
""",,,-9.9,Z/Z-ZZZZ,999.99,9.99,,999.99,ZZ-ZZZZ,ZZ-ZZZZ,9."""
"""99,9.99,9.99,9.99,9.99,9.99,,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZ"""
"""ZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ,ZZZ,ZZZ,ZZZ,9.99,,,-9.99,ZZ"""
"""-ZZZZ,-999.99,,-9999,,999.99,,,,999.99,99.99,,,ZZ-ZZZZZ"""
"""ZZZ,ZZ-ZZZZ-ZZZZZZZ,,,,ZZ-ZZ-ZZZZZZZZ,ZZZZZZZZ,ZZZ-ZZZZ"""
""",9999,999.99,ZZZ-ZZZZ,-9.99,-9.99,ZZZ-ZZZZ,99:99:99,,99"""
""",99,,9.99,,-99.99,,,,,,9.99,ZZZ-ZZZZ,-9.99,-9.99,9.99,9"""
""".99,,ZZZ,,,,,,,ZZZ,ZZZ,,,,,"""
)
# Set the number of lines so that a call to `parser_trim_buffers`
# is triggered: after a couple of full chunks are consumed a
# relatively small 'residual' chunk would cause reallocation
# within the parser.
chunksize, n_lines = 128, 2 * 128 + 15
csv_data = "\n".join([record_] * n_lines) + "\n"
# We will use StringIO to load the CSV from this text buffer.
# pd.read_csv() will iterate over the file in chunks and will
# finally read a residual chunk of really small size.
# Generate the expected output: manually create the dataframe
# by splitting by comma and repeating the `n_lines` times.
row = tuple(val_ if val_ else np.nan for val_ in record_.split(","))
expected = DataFrame(
[row for _ in range(n_lines)], dtype=object, columns=None, index=None
)
# Iterate over the CSV file in chunks of `chunksize` lines
with parser.read_csv(
StringIO(csv_data), header=None, dtype=object, chunksize=chunksize
) as chunks_:
result = concat(chunks_, axis=0, ignore_index=True)
# Check for data corruption if there was no segfault
tm.assert_frame_equal(result, expected)
# This extra test was added to replicate the fault in gh-5291.
# Force 'utf-8' encoding, so that `_string_convert` would take
# a different execution branch.
with parser.read_csv(
StringIO(csv_data),
header=None,
dtype=object,
chunksize=chunksize,
encoding="utf_8",
) as chunks_:
result = concat(chunks_, axis=0, ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_internal_null_byte(c_parser_only):
# see gh-14012
#
# The null byte ('\x00') should not be used as a
# true line terminator, escape character, or comment
# character, only as a placeholder to indicate that
# none was specified.
#
# This test should be moved to test_common.py ONLY when
# Python's csv class supports parsing '\x00'.
parser = c_parser_only
names = ["a", "b", "c"]
data = "1,2,3\n4,\x00,6\n7,8,9"
expected = DataFrame([[1, 2.0, 3], [4, np.nan, 6], [7, 8, 9]], columns=names)
result = parser.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(result, expected)
def test_read_nrows_large(c_parser_only):
# gh-7626 - Read only nrows of data in for large inputs (>262144b)
parser = c_parser_only
header_narrow = "\t".join(["COL_HEADER_" + str(i) for i in range(10)]) + "\n"
data_narrow = "\t".join(["somedatasomedatasomedata1" for _ in range(10)]) + "\n"
header_wide = "\t".join(["COL_HEADER_" + str(i) for i in range(15)]) + "\n"
data_wide = "\t".join(["somedatasomedatasomedata2" for _ in range(15)]) + "\n"
test_input = header_narrow + data_narrow * 1050 + header_wide + data_wide * 2
df = parser.read_csv(StringIO(test_input), sep="\t", nrows=1010)
assert df.size == 1010 * 10
def test_float_precision_round_trip_with_text(c_parser_only):
# see gh-15140
parser = c_parser_only
df = parser.read_csv(StringIO("a"), header=None, float_precision="round_trip")
tm.assert_frame_equal(df, DataFrame({0: ["a"]}))
def test_large_difference_in_columns(c_parser_only):
# see gh-14125
parser = c_parser_only
count = 10000
large_row = ("X," * count)[:-1] + "\n"
normal_row = "XXXXXX XXXXXX,111111111111111\n"
test_input = (large_row + normal_row * 6)[:-1]
result = parser.read_csv(StringIO(test_input), header=None, usecols=[0])
rows = test_input.split("\n")
expected = DataFrame([row.split(",")[0] for row in rows])
tm.assert_frame_equal(result, expected)
def test_data_after_quote(c_parser_only):
# see gh-15910
parser = c_parser_only
data = 'a\n1\n"b"a'
result = parser.read_csv(StringIO(data))
expected = DataFrame({"a": ["1", "ba"]})
tm.assert_frame_equal(result, expected)
def test_comment_whitespace_delimited(c_parser_only, capsys):
parser = c_parser_only
test_input = """\
1 2
2 2 3
3 2 3 # 3 fields
4 2 3# 3 fields
5 2 # 2 fields
6 2# 2 fields
7 # 1 field, NaN
8# 1 field, NaN
9 2 3 # skipped line
# comment"""
df = parser.read_csv(
StringIO(test_input),
comment="#",
header=None,
delimiter="\\s+",
skiprows=0,
error_bad_lines=False,
)
captured = capsys.readouterr()
# skipped lines 2, 3, 4, 9
for line_num in (2, 3, 4, 9):
assert f"Skipping line {line_num}" in captured.err
expected = DataFrame([[1, 2], [5, 2], [6, 2], [7, np.nan], [8, np.nan]])
tm.assert_frame_equal(df, expected)
def test_file_like_no_next(c_parser_only):
# gh-16530: the file-like need not have a "next" or "__next__"
# attribute despite having an "__iter__" attribute.
#
# NOTE: This is only true for the C engine, not Python engine.
class NoNextBuffer(StringIO):
def __next__(self):
raise AttributeError("No next method")
next = __next__
parser = c_parser_only
data = "a\n1"
expected = DataFrame({"a": [1]})
result = parser.read_csv(NoNextBuffer(data))
tm.assert_frame_equal(result, expected)
def test_buffer_rd_bytes_bad_unicode(c_parser_only):
# see gh-22748
t = BytesIO(b"\xB0")
t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape")
msg = "'utf-8' codec can't encode character"
with pytest.raises(UnicodeError, match=msg):
c_parser_only.read_csv(t, encoding="UTF-8")
@pytest.mark.parametrize("tar_suffix", [".tar", ".tar.gz"])
def test_read_tarfile(c_parser_only, csv_dir_path, tar_suffix):
# see gh-16530
#
# Unfortunately, Python's CSV library can't handle
# tarfile objects (expects string, not bytes when
# iterating through a file-like).
parser = c_parser_only
tar_path = os.path.join(csv_dir_path, "tar_csv" + tar_suffix)
with tarfile.open(tar_path, "r") as tar:
data_file = tar.extractfile("tar_data.csv")
out = parser.read_csv(data_file)
expected = DataFrame({"a": [1]})
tm.assert_frame_equal(out, expected)
@pytest.mark.high_memory
def test_bytes_exceed_2gb(c_parser_only):
# see gh-16798
#
# Read from a "CSV" that has a column larger than 2GB.
parser = c_parser_only
if parser.low_memory:
pytest.skip("not a high_memory test")
csv = StringIO("strings\n" + "\n".join(["x" * (1 << 20) for _ in range(2100)]))
df = parser.read_csv(csv)
assert not df.empty
def test_chunk_whitespace_on_boundary(c_parser_only):
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
#
# This test case has a field too large for the Python parser / CSV library.
parser = c_parser_only
chunk1 = "a" * (1024 * 256 - 2) + "\na"
chunk2 = "\n a"
result = parser.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(["a" * (1024 * 256 - 2), "a", " a"])
tm.assert_frame_equal(result, expected)
def test_file_handles_mmap(c_parser_only, csv1):
# gh-14418
#
# Don't close user provided file handles.
parser = c_parser_only
with open(csv1) as f:
m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
parser.read_csv(m)
assert not m.closed
m.close()
def test_file_binary_mode(c_parser_only):
# see gh-23779
parser = c_parser_only
expected = DataFrame([[1, 2, 3], [4, 5, 6]])
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write("1,2,3\n4,5,6")
with open(path, "rb") as f:
result = parser.read_csv(f, header=None)
tm.assert_frame_equal(result, expected)
def test_unix_style_breaks(c_parser_only):
# GH 11020
parser = c_parser_only
with tm.ensure_clean() as path:
with open(path, "w", newline="\n") as f:
f.write("blah\n\ncol_1,col_2,col_3\n\n")
result = parser.read_csv(path, skiprows=2, encoding="utf-8", engine="c")
expected = DataFrame(columns=["col_1", "col_2", "col_3"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
@pytest.mark.parametrize(
"data,thousands,decimal",
[
(
"""A|B|C
1|2,334.01|5
10|13|10.
""",
",",
".",
),
(
"""A|B|C
1|2.334,01|5
10|13|10,
""",
".",
",",
),
],
)
def test_1000_sep_with_decimal(
c_parser_only, data, thousands, decimal, float_precision
):
parser = c_parser_only
expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
result = parser.read_csv(
StringIO(data),
sep="|",
thousands=thousands,
decimal=decimal,
float_precision=float_precision,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
@pytest.mark.parametrize(
"value,expected",
[
("-1,0", -1.0),
("-1,2e0", -1.2),
("-1e0", -1.0),
("+1e0", 1.0),
("+1e+0", 1.0),
("+1e-1", 0.1),
("+,1e1", 1.0),
("+1,e0", 1.0),
("-,1e1", -1.0),
("-1,e0", -1.0),
("0,1", 0.1),
("1,", 1.0),
(",1", 0.1),
("-,1", -0.1),
("1_,", 1.0),
("1_234,56", 1234.56),
("1_234,56e0", 1234.56),
# negative cases; must not parse as float
("_", "_"),
("-_", "-_"),
("-_1", "-_1"),
("-_1e0", "-_1e0"),
("_1", "_1"),
("_1,", "_1,"),
("_1,_", "_1,_"),
("_1e0", "_1e0"),
("1,2e_1", "1,2e_1"),
("1,2e1_0", "1,2e1_0"),
("1,_2", "1,_2"),
(",1__2", ",1__2"),
(",1e", ",1e"),
("-,1e", "-,1e"),
("1_000,000_000", "1_000,000_000"),
("1,e1_2", "1,e1_2"),
],
)
def test_1000_sep_decimal_float_precision(
c_parser_only, value, expected, float_precision
):
# test decimal and thousand sep handling in across 'float_precision'
# parsers
parser = c_parser_only
df = parser.read_csv(
StringIO(value),
sep="|",
thousands="_",
decimal=",",
header=None,
float_precision=float_precision,
)
val = df.iloc[0, 0]
assert val == expected
def test_float_precision_options(c_parser_only):
# GH 17154, 36228
parser = c_parser_only
s = "foo\n243.164\n"
df = parser.read_csv(StringIO(s))
df2 = parser.read_csv(StringIO(s), float_precision="high")
tm.assert_frame_equal(df, df2)
df3 = parser.read_csv(StringIO(s), float_precision="legacy")
if IS64:
assert not df.iloc[0, 0] == df3.iloc[0, 0]
else:
assert df.iloc[0, 0] == df3.iloc[0, 0]
msg = "Unrecognized float_precision option: junk"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(s), float_precision="junk")
| bsd-3-clause |
nickgentoo/scikit-learn-graph | scripts/Keras_calculate_cv_allkernels.py | 1 | 7678 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 13:02:41 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from skgraph.feature_extraction.graph.ODDSTVectorizer import ODDSTVectorizer
from skgraph.feature_extraction.graph.NSPDK.NSPDKVectorizer import NSPDKVectorizer
from skgraph.feature_extraction.graph.WLVectorizer import WLVectorizer
from skgraph.datasets import load_graph_datasets
import numpy as np
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.layers import Input
from keras.models import Model
from keras import regularizers
#tutorial from https://blog.keras.io/building-autoencoders-in-keras.html
class DNN:
def __init__(self,inputDim,layerSize=None):
# this is our input placeholder
print layerSize
input_img = Input(shape=(inputDim,))
# "encoded" is the encoded representation of the input
encoded = Dense(layerSize[0], activation='relu')(input_img)
# add a Dense layer with a L1 activity regularizer
#encoded = Dense(layerSize[0], activation='relu',
# activity_regularizer=regularizers.activity_l1(10e-1))(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(inputDim, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
self.model = Model(input=input_img, output=decoded)
# this model maps an input to its encoded representation
self.encoder = Model(input=input_img, output=encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(layerSize[0],))
# retrieve the last layer of the autoencoder model
decoder_layer = self.model.layers[-1]
# create the decoder model
self.decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
#self.model.compile(optimizer='adadelta', loss='binary_crossentropy')
self.model.compile(optimizer='adagrad', loss='binary_crossentropy')
#self.model = Sequential()
#self.model.add(Dense(layerSize[0], input_dim=inputDim, init='uniform', activation='tanh'))
#for size in layerSize[1:]:
# self.model.add(Dense(size, init='uniform', activation='tanh'))
#self.model.add(Dense(outputDim, init='uniform', activation='relu'))
if __name__=='__main__':
if len(sys.argv)<1:
sys.exit("python ODDKernel_example.py dataset r l kernel n_hidden")
dataset=sys.argv[1]
max_radius=int(sys.argv[2])
la=float(sys.argv[3])
#hashs=int(sys.argv[3])
njobs=1
name=str(sys.argv[4])
kernel=sys.argv[5]
n_hidden=int(sys.argv[6])
#FIXED PARAMETERS
normalization=True
if dataset=="CAS":
print "Loading bursi(CAS) dataset"
g_it=load_graph_datasets.load_graphs_bursi()
elif dataset=="GDD":
print "Loading GDD dataset"
g_it=load_graph_datasets.load_graphs_GDD()
elif dataset=="CPDB":
print "Loading CPDB dataset"
g_it=load_graph_datasets.load_graphs_CPDB()
elif dataset=="AIDS":
print "Loading AIDS dataset"
g_it=load_graph_datasets.load_graphs_AIDS()
elif dataset=="NCI1":
print "Loading NCI1 dataset"
g_it=load_graph_datasets.load_graphs_NCI1()
elif dataset=="NCI109":
print "Loading NCI109 dataset"
g_it=load_graph_datasets.load_graphs_NCI109()
elif dataset=="NCI123":
print "Loading NCI123 dataset"
g_it=load_graph_datasets.load_graphs_NCI123()
elif dataset=="NCI_AIDS":
print "Loading NCI_AIDS dataset"
g_it=load_graph_datasets.load_graphs_NCI_AIDS()
else:
print "Unknown dataset name"
if kernel=="WL":
print "Lambda ignored"
print "Using WL fast subtree kernel"
Vectorizer=WLVectorizer(r=max_radius,normalization=normalization)
elif kernel=="ODDST":
print "Using ST kernel"
Vectorizer=ODDSTVectorizer(r=max_radius,l=la,normalization=normalization)
elif kernel=="NSPDK":
print "Using NSPDK kernel, lambda parameter interpreted as d"
Vectorizer=NSPDKVectorizer(r=max_radius,d=int(la),normalization=normalization)
else:
print "Unrecognized kernel"
features=Vectorizer.transform(g_it.graphs) #Parallel ,njobs
#print GM
# GMsvm=[]
# for i in xrange(len(GM)):
# GMsvm.append([])
# GMsvm[i]=[i+1]
# GMsvm[i].extend(GM[i])
# #print GMsvm
# from sklearn import datasets
# print "Saving Gram matrix"
# #datasets.dump_svmlight_file(GMsvm,g_it.target, name+".svmlight")
# datasets.dump_svmlight_file(np.array(GMsvm),g_it.target, name+".svmlight")
# #Test manual dump
#LEARN AUTOENCODER
print "Extracted", features.shape[1], "features from",features.shape[0],"examples."
n=features.shape[0]
densefeat=features.todense()
x_train=densefeat[:int(n*0.8),:]
#TODO sbagliato, fare slicing!
x_train = x_train.reshape((len(x_train), np.prod(features.shape[1])))
x_test=densefeat[int(n*0.8):,:]
print x_train.shape
print x_test.shape
AutoEncoder=DNN(x_train.shape[1],layerSize=[n_hidden])
AutoEncoder.model.fit(x_train, x_train,
nb_epoch=10,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# encode and decode some digits
# note that we take them from the *test* set
encoded_features = AutoEncoder.encoder.predict(densefeat)
from sklearn import cross_validation
from sklearn.svm import SVC, LinearSVC
clf = LinearSVC(C=1,dual=True) #, class_weight='auto'
#clf = SVC(C=1,kernel='rbf',gamma=0.001) #, class_weight='auto'
#
y_train=g_it.target
kf = cross_validation.StratifiedKFold(y_train, n_folds=10, shuffle=True,random_state=42)
scores=cross_validation.cross_val_score(
clf, encoded_features, y_train, cv=kf, scoring='accuracy')
print scores
print "Inner AUROC: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std())
#print GM
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
# from sklearn.cross_validation import StratifiedShuffleSplit
# from sklearn.grid_search import GridSearchCV
# C_range = np.logspace(-2, 4, 7)
# gamma_range = np.logspace(-9, 3, 13)
# param_grid = dict( C=C_range)
# cv = StratifiedShuffleSplit(y_train, n_iter=10, test_size=0.2, random_state=42)
# grid = GridSearchCV(LinearSVC(), param_grid=param_grid, cv=cv,verbose=10)
# print "starting grid search"
# grid.fit(encoded_features, y_train)
#
# print("The best parameters are %s with a score of %0.2f"
# % (grid.best_params_, grid.best_score_)) | gpl-3.0 |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/feature_selection/tests/test_feature_select.py | 10 | 26399 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_boundary_case_ch2():
# Test boundary case, and always aim to select 1 feature.
X = np.array([[10, 20], [20, 20], [20, 30]])
y = np.array([[1], [0], [0]])
scores, pvalues = chi2(X, y)
assert_array_almost_equal(scores, np.array([4., 0.71428571]))
assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
filter_fdr = SelectFdr(chi2, alpha=0.1)
filter_fdr.fit(X, y)
support_fdr = filter_fdr.get_support()
assert_array_equal(support_fdr, np.array([True, False]))
filter_kbest = SelectKBest(chi2, k=1)
filter_kbest.fit(X, y)
support_kbest = filter_kbest.get_support()
assert_array_equal(support_kbest, np.array([True, False]))
filter_percentile = SelectPercentile(chi2, percentile=50)
filter_percentile.fit(X, y)
support_percentile = filter_percentile.get_support()
assert_array_equal(support_percentile, np.array([True, False]))
filter_fpr = SelectFpr(chi2, alpha=0.1)
filter_fpr.fit(X, y)
support_fpr = filter_fpr.get_support()
assert_array_equal(support_fpr, np.array([True, False]))
filter_fwe = SelectFwe(chi2, alpha=0.1)
filter_fwe.fit(X, y)
support_fwe = filter_fwe.get_support()
assert_array_equal(support_fwe, np.array([True, False]))
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(100)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_scorefunc_multilabel():
# Test whether k-best and percentiles works with multilabels with chi2.
X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
y = [[1, 1], [0, 1], [1, 0]]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| mit |
plotly/plotly.py | packages/python/plotly/plotly/tests/test_optional/test_matplotlylib/test_axis_scales.py | 2 | 1336 | from __future__ import absolute_import
import pytest
from plotly import optional_imports
from plotly.tests.utils import compare_dict, strip_dict_params
from plotly.tests.test_optional.optional_utils import run_fig
from plotly.tests.test_optional.test_matplotlylib.data.axis_scales import *
matplotlylib = optional_imports.get_module("plotly.matplotlylib")
if matplotlylib:
import matplotlib.pyplot as plt
@pytest.mark.matplotlib
def test_even_linear_scale():
fig, ax = plt.subplots()
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y = [10, 3, 100, 6, 45, 4, 80, 45, 3, 59]
ax.plot(x, y)
_ = ax.set_xticks(list(range(0, 20, 3)), True)
_ = ax.set_yticks(list(range(0, 200, 13)), True)
renderer = run_fig(fig)
for data_no, data_dict in enumerate(renderer.plotly_fig["data"]):
# equivalent, msg = compare_dict(data_dict.to_plotly_json(),
# EVEN_LINEAR_SCALE['data'][data_no].to_plotly_json())
# assert equivalent, msg
d1, d2 = strip_dict_params(
data_dict, EVEN_LINEAR_SCALE["data"][data_no], ignore=["uid"]
)
equivalent, msg = compare_dict(d1, d2)
assert equivalent, msg
equivalent, msg = compare_dict(
renderer.plotly_fig["layout"], EVEN_LINEAR_SCALE["layout"]
)
assert equivalent, msg
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/blocking_input.py | 10 | 11766 | """
This provides several classes used for blocking interaction with figure
windows:
:class:`BlockingInput`
creates a callable object to retrieve events in a blocking way for
interactive sessions
:class:`BlockingKeyMouseInput`
creates a callable object to retrieve key or mouse clicks in a blocking
way for interactive sessions.
Note: Subclass of BlockingInput. Used by waitforbuttonpress
:class:`BlockingMouseInput`
creates a callable object to retrieve mouse clicks in a blocking way for
interactive sessions.
Note: Subclass of BlockingInput. Used by ginput
:class:`BlockingContourLabeler`
creates a callable object to retrieve mouse clicks in a blocking way that
will then be used to place labels on a ContourSet
Note: Subclass of BlockingMouseInput. Used by clabel
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import verbose
from matplotlib.cbook import is_sequence_of_strings
import matplotlib.lines as mlines
import warnings
class BlockingInput(object):
"""
Class that creates a callable object to retrieve events in a
blocking way.
"""
def __init__(self, fig, eventslist=()):
self.fig = fig
if not is_sequence_of_strings(eventslist):
raise ValueError("Requires a sequence of event name strings")
self.eventslist = eventslist
def on_event(self, event):
"""
Event handler that will be passed to the current figure to
retrieve events.
"""
# Add a new event to list - using a separate function is
# overkill for the base class, but this is consistent with
# subclasses
self.add_event(event)
verbose.report("Event %i" % len(self.events))
# This will extract info from events
self.post_event()
# Check if we have enough events already
if len(self.events) >= self.n and self.n > 0:
self.fig.canvas.stop_event_loop()
def post_event(self):
"""For baseclass, do nothing but collect events"""
pass
def cleanup(self):
"""Disconnect all callbacks"""
for cb in self.callbacks:
self.fig.canvas.mpl_disconnect(cb)
self.callbacks = []
def add_event(self, event):
"""For base class, this just appends an event to events."""
self.events.append(event)
def pop_event(self, index=-1):
"""
This removes an event from the event list. Defaults to
removing last event, but an index can be supplied. Note that
this does not check that there are events, much like the
normal pop method. If not events exist, this will throw an
exception.
"""
self.events.pop(index)
def pop(self, index=-1):
self.pop_event(index)
pop.__doc__ = pop_event.__doc__
def __call__(self, n=1, timeout=30):
"""
Blocking call to retrieve n events
"""
if not isinstance(n, int):
raise ValueError("Requires an integer argument")
self.n = n
self.events = []
self.callbacks = []
# Ensure that the figure is shown
self.fig.show()
# connect the events to the on_event function call
for n in self.eventslist:
self.callbacks.append(
self.fig.canvas.mpl_connect(n, self.on_event))
try:
# Start event loop
self.fig.canvas.start_event_loop(timeout=timeout)
finally: # Run even on exception like ctrl-c
# Disconnect the callbacks
self.cleanup()
# Return the events in this case
return self.events
class BlockingMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve mouse clicks in a
blocking way.
This class will also retrieve keyboard clicks and treat them like
appropriate mouse clicks (delete and backspace are like mouse button 3,
enter is like mouse button 2 and all others are like mouse button 1).
"""
button_add = 1
button_pop = 3
button_stop = 2
def __init__(self, fig, mouse_add=1, mouse_pop=3, mouse_stop=2):
BlockingInput.__init__(self, fig=fig,
eventslist=('button_press_event',
'key_press_event'))
self.button_add = mouse_add
self.button_pop = mouse_pop
self.button_stop = mouse_stop
def post_event(self):
"""
This will be called to process events
"""
if len(self.events) == 0:
warnings.warn("No events yet")
elif self.events[-1].name == 'key_press_event':
self.key_event()
else:
self.mouse_event()
def mouse_event(self):
'''Process a mouse click event'''
event = self.events[-1]
button = event.button
if button == self.button_pop:
self.mouse_event_pop(event)
elif button == self.button_stop:
self.mouse_event_stop(event)
else:
self.mouse_event_add(event)
def key_event(self):
'''
Process a key click event. This maps certain keys to appropriate
mouse click events.
'''
event = self.events[-1]
if event.key is None:
# at least in mac os X gtk backend some key returns None.
return
key = event.key.lower()
if key in ['backspace', 'delete']:
self.mouse_event_pop(event)
elif key in ['escape', 'enter']:
# on windows XP and wxAgg, the enter key doesn't seem to register
self.mouse_event_stop(event)
else:
self.mouse_event_add(event)
def mouse_event_add(self, event):
"""
Will be called for any event involving a button other than
button 2 or 3. This will add a click if it is inside axes.
"""
if event.inaxes:
self.add_click(event)
else: # If not a valid click, remove from event list
BlockingInput.pop(self, -1)
def mouse_event_stop(self, event):
"""
Will be called for any event involving button 2.
Button 2 ends blocking input.
"""
# Remove last event just for cleanliness
BlockingInput.pop(self, -1)
# This will exit even if not in infinite mode. This is
# consistent with MATLAB and sometimes quite useful, but will
# require the user to test how many points were actually
# returned before using data.
self.fig.canvas.stop_event_loop()
def mouse_event_pop(self, event):
"""
Will be called for any event involving button 3.
Button 3 removes the last click.
"""
# Remove this last event
BlockingInput.pop(self, -1)
# Now remove any existing clicks if possible
if len(self.events) > 0:
self.pop(event, -1)
def add_click(self, event):
"""
This add the coordinates of an event to the list of clicks
"""
self.clicks.append((event.xdata, event.ydata))
verbose.report("input %i: %f,%f" %
(len(self.clicks), event.xdata, event.ydata))
# If desired plot up click
if self.show_clicks:
line = mlines.Line2D([event.xdata], [event.ydata],
marker='+', color='r')
event.inaxes.add_line(line)
self.marks.append(line)
self.fig.canvas.draw()
def pop_click(self, event, index=-1):
"""
This removes a click from the list of clicks. Defaults to
removing the last click.
"""
self.clicks.pop(index)
if self.show_clicks:
mark = self.marks.pop(index)
mark.remove()
self.fig.canvas.draw()
# NOTE: I do NOT understand why the above 3 lines does not work
# for the keyboard backspace event on windows XP wxAgg.
# maybe event.inaxes here is a COPY of the actual axes?
def pop(self, event, index=-1):
"""
This removes a click and the associated event from the object.
Defaults to removing the last click, but any index can be
supplied.
"""
self.pop_click(event, index)
BlockingInput.pop(self, index)
def cleanup(self, event=None):
# clean the figure
if self.show_clicks:
for mark in self.marks:
mark.remove()
self.marks = []
self.fig.canvas.draw()
# Call base class to remove callbacks
BlockingInput.cleanup(self)
def __call__(self, n=1, timeout=30, show_clicks=True):
"""
Blocking call to retrieve n coordinate pairs through mouse
clicks.
"""
self.show_clicks = show_clicks
self.clicks = []
self.marks = []
BlockingInput.__call__(self, n=n, timeout=timeout)
return self.clicks
class BlockingContourLabeler(BlockingMouseInput):
"""
Class that creates a callable object that uses mouse clicks or key
clicks on a figure window to place contour labels.
"""
def __init__(self, cs):
self.cs = cs
BlockingMouseInput.__init__(self, fig=cs.ax.figure)
def add_click(self, event):
self.button1(event)
def pop_click(self, event, index=-1):
self.button3(event)
def button1(self, event):
"""
This will be called if an event involving a button other than
2 or 3 occcurs. This will add a label to a contour.
"""
# Shorthand
if event.inaxes == self.cs.ax:
self.cs.add_label_near(event.x, event.y, self.inline,
inline_spacing=self.inline_spacing,
transform=False)
self.fig.canvas.draw()
else: # Remove event if not valid
BlockingInput.pop(self)
def button3(self, event):
"""
This will be called if button 3 is clicked. This will remove
a label if not in inline mode. Unfortunately, if one is doing
inline labels, then there is currently no way to fix the
broken contour - once humpty-dumpty is broken, he can't be put
back together. In inline mode, this does nothing.
"""
if self.inline:
pass
else:
self.cs.pop_label()
self.cs.ax.figure.canvas.draw()
def __call__(self, inline, inline_spacing=5, n=-1, timeout=-1):
self.inline = inline
self.inline_spacing = inline_spacing
BlockingMouseInput.__call__(self, n=n, timeout=timeout,
show_clicks=False)
class BlockingKeyMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve a single mouse or
keyboard click
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig, eventslist=(
'button_press_event', 'key_press_event'))
def post_event(self):
"""
Determines if it is a key event
"""
if len(self.events) == 0:
warnings.warn("No events yet")
else:
self.keyormouse = self.events[-1].name == 'key_press_event'
def __call__(self, timeout=30):
"""
Blocking call to retrieve a single mouse or key click
Returns True if key click, False if mouse, or None if timeout
"""
self.keyormouse = None
BlockingInput.__call__(self, n=1, timeout=timeout)
return self.keyormouse
| gpl-3.0 |
linebp/pandas | pandas/tests/sparse/test_combine_concat.py | 15 | 13923 | # pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseSeriesConcat(object):
def test_concat(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
sparse1 = pd.SparseSeries(val1, fill_value=0, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, fill_value=0, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y')
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
exp = pd.SparseDataFrame(exp)
tm.assert_sp_frame_equal(res, exp)
def test_concat_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y', fill_value=0)
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_different_kind(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x', kind='integer')
sparse2 = pd.SparseSeries(val2, name='y', kind='block', fill_value=0)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind='integer')
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind='block', fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_sparse_dense(self):
# use first input's fill_value
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse = pd.SparseSeries(val1, name='x', kind=kind)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.concat([pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
sparse = pd.SparseSeries(val1, name='x', kind=kind, fill_value=0)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.concat([pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
class TestSparseDataFrameConcat(object):
def setup_method(self, method):
self.dense1 = pd.DataFrame({'A': [0., 1., 2., np.nan],
'B': [0., 0., 0., 0.],
'C': [np.nan, np.nan, np.nan, np.nan],
'D': [1., 2., 3., 4.]})
self.dense2 = pd.DataFrame({'A': [5., 6., 7., 8.],
'B': [np.nan, 0., 7., 8.],
'C': [5., 6., np.nan, np.nan],
'D': [np.nan, np.nan, np.nan, np.nan]})
self.dense3 = pd.DataFrame({'E': [5., 6., 7., 8.],
'F': [np.nan, 0., 7., 8.],
'G': [5., 6., np.nan, np.nan],
'H': [np.nan, np.nan, np.nan, np.nan]})
def test_concat(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
def test_concat_different_fill_value(self):
# 1st fill_value will be used
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
def test_concat_different_columns(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3])
exp = pd.concat([self.dense1, self.dense3]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse])
exp = pd.concat([self.dense3, self.dense1]).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3])
exp = pd.concat([self.dense1, self.dense3]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse])
exp = pd.concat([self.dense3, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3])
exp = pd.concat([self.dense1, self.dense3])
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse])
exp = pd.concat([self.dense3, self.dense1])
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_series(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
for col in ['A', 'D']:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col], self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
for col in ['C', 'D']:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1,
self.dense2[col]]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col],
self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
def test_concat_axis1(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3],
axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1],
axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_sparse_dense(self):
sparse = self.dense1.to_sparse()
res = pd.concat([sparse, self.dense2])
exp = pd.concat([self.dense1, self.dense2])
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([self.dense2, sparse])
exp = pd.concat([self.dense2, self.dense1])
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
sparse = self.dense1.to_sparse(fill_value=0)
res = pd.concat([sparse, self.dense2])
exp = pd.concat([self.dense1, self.dense2])
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([self.dense2, sparse])
exp = pd.concat([self.dense2, self.dense1])
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([self.dense3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res, exp)
res = pd.concat([sparse, self.dense3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res, exp)
| bsd-3-clause |
sgranitz/nw | predict400/week9.py | 2 | 1496 | ## Week 9: Probability Density Functions
# Consider the probability density function f(x) = (3/26)x2 on [1, 3].
# On the same interval, consider the functions g(x) = (3/26)x3 and
# h(x) = (x – 30/13)(3/26)x3, which when integrated over the interval [1, 3]
# represent the mean and variance, respectively. Using Python, verify that f(x)
# is a probability density function, that the mean is 30/13, the variance is
# approximately 0.2592 and determine the standard deviation.
# Also, use Python to graph these three functions together (use different colors for each)
# and indicate the mean and variance on the x-axis.
import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
def f(x):
y = (3 / 26) * x ** 2
return (y)
def g(x):
y = (3 / 26) * x ** 3
return (y)
def h(x):
y = (x - 30 / 13) * (3 / 26) * x ** 3
return (y)
x = np.arange(1, 3.05, 0.05)
res = sp.integrate.quad(f, 1, 3)
print('Is f(x) prob density?', 1 == res[0])
# True
mean = sp.integrate.quad(g, 1, 3)
print('Is mean 30/13?', round(30/13, 4) == round(mean[0], 4))
# True
var = sp.integrate.quad(h, 1, 3)
print('Is variance 0.2592?', 0.2592 == round(var[0], 4))
# True
plt.plot(x, f(x), label = 'f(x)')
plt.plot(x, g(x), label = 'g(x)')
plt.plot(x, h(x), label = 'h(x)')
plt.plot(mean[0], 0, 'rs', label = 'mean')
plt.plot(var[0], 0, 'bs', label = 'var')
plt.text(1, 2.5, ['Mean =', round(mean[0], 4)])
plt.text(1, 2, ['Var =', round(var[0], 4)])
plt.legend()
plt.show()
| mit |
krez13/scikit-learn | sklearn/neighbors/approximate.py | 30 | 22370 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
davidwhogg/Avast | code/triangle_basis.py | 1 | 10392 | """
This file is part of the Avast project.
Copyright 2016 Megan Bedell (Chicago) and David W. Hogg (NYU).
"""
import glob
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from scipy.optimize import minimize
from scipy.linalg import svd
from scipy.io.idl import readsav
c = 2.99792458e8 # m/s
xi_scale = 1.e7
scale_scale = 1.e-4
am_penalty = 1.0 # MAGIC NUMBER
xi_penalty = 3.e-6 # MAGIC NUMBER (1 km/s-ish)
def unpack_pars(pars, n_ms, n_epoch):
# unpack parameters
ams = pars[0:n_ms]
scales = pars[n_ms:n_ms+n_epoch] / scale_scale
xis = pars[n_ms+n_epoch:] / xi_scale
return ams, scales, xis
def xi_to_v(xi):
# translate ln(wavelength) Doppler shift to a velocity in m/s
return np.tanh(xi) * c
def v_to_xi(v):
return np.arctanh(v/c)
def g(xs, xms, del_x):
# returns values of triangle components centered on xms at location xs
# xs & xms must be broadcastable
return np.maximum(1. - np.abs(xs - xms)/del_x, 0.)
def dg_dx(xs, xms, del_x):
# return the derivatives of `g()`
signs = np.sign(xms - xs)
signs[np.abs(xms - xs) > del_x] = 0.
return signs/del_x
def f(xs, xms, del_x, ams):
# returns values of triangle-based model f(x) at xs
# xs : ln(wavelength) at point(s) of interest
# xms : ln(wavelength) grid, shape (M)
# del_x : ln(wavelength) spacing of xms
# ams : function coefficients, shape (M)
return np.sum(ams[None,:] * g(xs[:,None], xms[None,:], del_x), axis=1)
def resid_function(pars, xs, ys, yerrs, xms, del_x):
"""
function to minimize
## bugs:
- needs proper comment header.
- array indexing is very brittle.
- penalty terms are arbitrary.
"""
n_epoch = len(xs)
n_ms = len(xms)
n_x = len(xs[0]) # assumes all xs are the same length
ams, scales, xis = unpack_pars(pars, n_ms, n_epoch)
resid = np.zeros((n_epoch*n_x + n_ms + n_epoch))
for j in range(n_epoch):
xprimes = xs[j] + xis[j]
calc = scales[j] * f(xprimes, xms, del_x, ams)
resid[j*n_x:(j+1)*n_x] = (ys[j] - calc) / yerrs[j]
resid[-n_epoch-n_ms:-n_epoch] = (ams - 1.) / am_penalty
resid[-n_epoch:] = (xis - 0.) / xi_penalty
return resid.flatten()
def resid_deriv(pars, xs, ys, yerrs, xms, del_x):
"""
derivatives of resid_function() wrt pars
"""
n_epoch = len(xs)
n_ms = len(xms)
n_x = len(xs[0]) # assumes all xs are the same length
ams, scales, xis = unpack_pars(pars, n_ms, n_epoch)
resid_deriv = np.zeros((n_epoch*n_x + n_ms + n_epoch, len(pars)))
for j in range(n_epoch):
xprimes = xs[j] + xis[j]
dy_dams = scales[j] * g(xprimes[:,None], xms[None,:], del_x)
resid_deriv[j*n_x:(j+1)*n_x,:n_ms] = - dy_dams / (yerrs[j])[:,None]
dy_dsj = f(xprimes, xms, del_x, ams)
resid_deriv[j*n_x:(j+1)*n_x,n_ms+j] = - dy_dsj / yerrs[j] / scale_scale
dy_dxij = scales[j] * np.sum(ams[None,:] * dg_dx(xprimes[:,None],
xms[None,:], del_x), axis=1)
resid_deriv[j*n_x:(j+1)*n_x,-n_epoch+j] = - dy_dxij / yerrs[j] / xi_scale
resid_deriv[-n_epoch-n_ms:-n_epoch,:n_ms] = np.eye(n_ms) / am_penalty
resid_deriv[-n_epoch:,-n_epoch:] = np.eye(n_epoch) / xi_penalty / xi_scale
return resid_deriv
def obj_function(pars, xs, ys, yerrs, xms, del_x):
# scalar objective function
resid = resid_function(pars, xs, ys, yerrs, xms, del_x)
return np.dot(resid, resid)
def obj_deriv(pars, xs, ys, yerrs, xms, del_x):
# derivative of objective function
resid = resid_function(pars, xs, ys, yerrs, xms, del_x)
matrix = resid_deriv(pars, xs, ys, yerrs, xms, del_x)
return 2.0 * np.dot(resid, matrix)
def min_v(pars, i, xs, ys, yerrs, xms, del_x):
# do a simple minimization of just one xi parameter
# i : epoch # to minimize, 0-2
tmp_pars = np.copy(pars)
obj = [] # objective function
xi0 = []
for xi in np.linspace(xis[i]-1.,xis[i]+1.,100):
tmp_pars[-n_epoch+i] = xi
resids = resid_function(tmp_pars, xs, ys, yerrs, xms, del_x)
obj = np.append(obj, np.dot(resids,resids))
xi0 = np.append(xi0,xi)
plt.clf()
plt.plot(xi_to_v(xi0),obj)
plt.axvline(xi_to_v(xis[i]))
plt.xlabel('v (m/s)')
plt.ylabel('objective function')
plt.savefig('objectivefn_v{0}.png'.format(i))
xi_min = xi0[np.argmin(obj)]
tmp_pars[-n_epoch+i] = xi_min
return tmp_pars
def save_plot(xs, obs, calc, resid, x_plot, calc_plot, save_name, i):
xs = np.e**xs
x_plot = np.e**x_plot
fig = plt.figure()
ax1 = fig.add_subplot(2,1,1)
ax1.step(xs,obs, color='black', label='Observed')
ax1.plot(x_plot,calc_plot, color='red', label='Calculated')
ax1.set_ylabel('Flux')
#ax1.legend()
ax1.set_title('Epoch {0}'.format(i))
ax1.set_xticklabels( () )
ax2 = fig.add_subplot(2,1,2)
ax2.step(xs,obs - calc, color='black')
ax2.step(xs,resid, color='red')
ax2.set_ylabel('(O-C)')
ax2.ticklabel_format(useOffset=False)
ax2.set_xlabel(r'Wavelength ($\AA$)')
majorLocator = MultipleLocator(1)
minorLocator = MultipleLocator(0.1)
ax1.xaxis.set_minor_locator(minorLocator)
ax1.xaxis.set_major_locator(majorLocator)
ax2.xaxis.set_minor_locator(minorLocator)
ax2.xaxis.set_major_locator(majorLocator)
majorLocator = MultipleLocator(5000)
ax1.yaxis.set_major_locator(majorLocator)
majorLocator = MultipleLocator(200)
ax2.yaxis.set_major_locator(majorLocator)
ax2.set_ylim([-500,500])
fig.subplots_adjust(hspace=0.05)
ax1.set_xlim([x_plot.min(),x_plot.max()])
ax2.set_xlim([x_plot.min(),x_plot.max()])
plt.savefig(save_name)
if __name__ == "__main__":
data_dir = '../data/halpha/'
print "Reading files..."
filelist = glob.glob(data_dir+'*.txt')
nfile = len(filelist)
xs = None
for e,fn in enumerate(filelist):
w, s = np.loadtxt(fn, unpack=True)
if xs is None:
nwave = len(w)
xs, ys = np.zeros([nfile,nwave]), np.zeros([nfile,nwave])
assert len(w) == nwave
xs[e] = np.log(w)
ys[e] = s
print "Got data!"
yerrs = np.sqrt(ys) # assumes Poisson noise and a gain of 1.0
del_x = 1.3e-5/2.0
xms = np.arange(np.min(xs) - 0.5 * del_x, np.max(xs) + 0.99 * del_x, del_x)
# initial fit to ams & scales:
fa = (xs, ys, yerrs, xms, del_x)
n_epoch = len(xs)
n_ms = len(xms)
ams0 = np.ones(n_ms)
scales0 = [np.median(s) * scale_scale for s in ys]
xis0 = np.zeros(n_epoch) * xi_scale
#xis0 = np.random.normal(size=n_epoch)/1.e7 # ~10 m/s level
pars0 = np.append(ams0, np.append(scales0, xis0))
print "Optimizing...."
gtol = 1.e-9
res = minimize(obj_function, pars0, args=fa, method='BFGS', jac=obj_deriv)
print "Solution achieved!"
# look at the fit:
pars = res['x']
ams, scales, xis = unpack_pars(pars, n_ms, n_epoch)
vs = xi_to_v(xis)
print "Velocities:", vs
calcs = np.zeros((n_epoch, len(xs[e])))
for e in range(n_epoch):
xprimes = xs[e] + xis[e]
calc = f(xprimes, xms, del_x, ams * scales[e])
x_plot = np.linspace(xprimes[0],xprimes[-1],num=5000)
calc_plot = f(x_plot+xis[e], xms, del_x, ams * scales[e])
resid = resid_function(pars, xs, ys, yerrs, xms, del_x)
n_x = len(xs[e])
resid = resid[e*n_x:(e+1)*n_x] * yerrs[e]
if e == 0:
save_plot(xs[e], ys[e], calc, resid, x_plot, calc_plot, 'fig/epoch'+str(e)+'.pdf', e)
calcs[e] = calc
scaled_resids = (ys - calcs) / scales[:,None]
u, s, v = svd(scaled_resids, full_matrices=False)
u.shape, s.shape, v.shape
data_dir = "/Users/mbedell/Documents/Research/HARPSTwins/Results/"
pipeline = readsav(data_dir+'HIP22263_result.dat')
plt.scatter((pipeline.rv-np.mean(pipeline.rv))*1.e3, u[:,0])
plt.xlabel('(Relative) Pipeline RV (m/s)')
plt.ylabel('First PCA Component')
plt.savefig('fig/halpha_rvpca.png')
plt.clf()
plt.scatter((pipeline.shk), u[:,0])
plt.xlabel('SHK Index')
plt.ylabel('First PCA Component')
plt.savefig('fig/halpha_shkpca.png')
if False:
# plotting objective function with various parameters:
tmp_pars = np.copy(pars)
obj = [] # objective function
a20 = []
for a in np.linspace(ams[20]-100.,ams[20]+100.,100):
tmp_pars[20] = a
resids = resid_function(tmp_pars, xs, ys, yerrs, xms, del_x)
obj = np.append(obj, np.dot(resids,resids))
a20 = np.append(a20,a)
plt.clf()
plt.plot(a20,obj)
plt.axvline(ams[20], linestyle='solid')
plt.xlabel(r'a$_{20}$')
plt.ylabel('objective function')
plt.savefig('objectivefn_a20.png')
plt.clf()
tmp_pars = np.copy(pars)
obj = [] # objective function
scale0 = []
for s in np.linspace(scales[0]*0.95,scales[0]*1.05,100):
tmp_pars[n_ms] = s
resids = resid_function(tmp_pars, xs, ys, yerrs, xms, del_x)
obj = np.append(obj, np.dot(resids,resids))
scale0 = np.append(scale0,s)
plt.clf()
plt.plot(scale0,obj)
plt.axvline(scales[0], linestyle='solid')
plt.xlabel(r'scale$_{0}$')
plt.ylabel('objective function')
plt.savefig('objectivefn_scale0.png')
plt.clf()
tmp_pars = np.copy(pars)
obj = [] # objective function
xi0 = []
for xi in np.linspace(xis[0]*0.95,xis[0]*1.05,100):
tmp_pars[n_ms+n_epoch] = xi
resids = resid_function(tmp_pars, xs, ys, yerrs, xms, del_x)
obj = np.append(obj, np.dot(resids,resids))
xi0 = np.append(xi0,xi)
plt.clf()
plt.plot(xi0,obj)
plt.axvline(xis[0], linestyle='solid')
plt.xlabel(r'$\xi_{0}$')
plt.ylabel('objective function')
plt.savefig('objectivefn_xi0.png')
plt.clf()
'''''
# optimize one epoch at a time:
for i in range(n_epoch):
pars = min_v(pars, i, xs, ys, xms, del_x)
resids = resid_function(pars, xs, ys, xms, del_x)
ams, scales, xis = unpack_pars(pars, n_ms, n_epoch)
print "Optimization of velocity at epoch {0}:".format(i)
print "Objective function value: {0}".format(np.dot(resids,resids))
#vs = xi_to_v(xis)
#print "Velocities:", vs
'''
| mit |
larsoner/mne-python | mne/tests/test_label.py | 8 | 41281 | from itertools import product
import glob
import os
import os.path as op
import pickle
import shutil
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal)
import pytest
from mne.datasets import testing
from mne import (read_label, stc_to_label, read_source_estimate,
read_source_spaces, grow_labels, read_labels_from_annot,
write_labels_to_annot, split_label, spatial_tris_adjacency,
read_surface, random_parcellation, morph_labels,
labels_to_stc)
from mne.label import (Label, _blend_colors, label_sign_flip, _load_vert_pos,
select_sources)
from mne.utils import (_TempDir, requires_sklearn, get_subjects_dir,
run_tests_if_main, check_version)
from mne.label import _n_colors, _read_annot, _read_annot_cands
from mne.source_space import SourceSpaces
from mne.source_estimate import mesh_edges
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
stc_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-lh.stc')
real_label_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-lh.label')
real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-rh.label')
v1_label_fname = op.join(subjects_dir, 'sample', 'label', 'lh.V1.label')
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
src_bad_fname = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
label_dir = op.join(subjects_dir, 'sample', 'label', 'aparc')
test_path = op.join(op.split(__file__)[0], '..', 'io', 'tests', 'data')
label_fname = op.join(test_path, 'test-lh.label')
label_rh_fname = op.join(test_path, 'test-rh.label')
# This code was used to generate the "fake" test labels:
# for hemi in ['lh', 'rh']:
# label = Label(np.unique((np.random.rand(100) * 10242).astype(int)),
# hemi=hemi, comment='Test ' + hemi, subject='fsaverage')
# label.save(op.join(test_path, 'test-%s.label' % hemi))
# XXX : this was added for backward compat and keep the old test_label_in_src
def _stc_to_label(stc, src, smooth, subjects_dir=None):
"""Compute a label from the non-zero sources in an stc object.
Parameters
----------
stc : SourceEstimate
The source estimates.
src : SourceSpaces | str | None
The source space over which the source estimates are defined.
If it's a string it should the subject name (e.g. fsaverage).
Can be None if stc.subject is not None.
smooth : int
Number of smoothing iterations.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
labels : list of Labels | list of list of Labels
The generated labels. If connected is False, it returns
a list of Labels (one per hemisphere). If no Label is available
in a hemisphere, None is returned. If connected is True,
it returns for each hemisphere a list of connected labels
ordered in decreasing order depending of the maximum value in the stc.
If no Label is available in an hemisphere, an empty list is returned.
"""
src = stc.subject if src is None else src
if isinstance(src, str):
subject = src
else:
subject = stc.subject
if isinstance(src, str):
subjects_dir = get_subjects_dir(subjects_dir)
surf_path_from = op.join(subjects_dir, src, 'surf')
rr_lh, tris_lh = read_surface(op.join(surf_path_from, 'lh.white'))
rr_rh, tris_rh = read_surface(op.join(surf_path_from, 'rh.white'))
rr = [rr_lh, rr_rh]
tris = [tris_lh, tris_rh]
else:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or a set of source spaces')
if len(src) != 2:
raise ValueError('source space should contain the 2 hemispheres')
rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
tris = [src[0]['tris'], src[1]['tris']]
labels = []
cnt = 0
for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
zip(['lh', 'rh'], stc.vertices, tris, rr)):
this_data = stc.data[cnt:cnt + len(this_vertno)]
e = mesh_edges(this_tris)
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
clusters = [this_vertno[np.any(this_data, axis=1)]]
cnt += len(this_vertno)
clusters = [c for c in clusters if len(c) > 0]
if len(clusters) == 0:
this_labels = None
else:
this_labels = []
colors = _n_colors(len(clusters))
for c, color in zip(clusters, colors):
idx_use = c
for k in range(smooth):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
label = Label(idx_use, this_rr[idx_use], None, hemi,
'Label from stc', subject=subject,
color=color)
this_labels.append(label)
this_labels = this_labels[0]
labels.append(this_labels)
return labels
def assert_labels_equal(l0, l1, decimal=5, comment=True, color=True):
"""Assert two labels are equal."""
if comment:
assert_equal(l0.comment, l1.comment)
if color:
assert_equal(l0.color, l1.color)
for attr in ['hemi', 'subject']:
attr0 = getattr(l0, attr)
attr1 = getattr(l1, attr)
msg = "label.%s: %r != %r" % (attr, attr0, attr1)
assert_equal(attr0, attr1, msg)
for attr in ['vertices', 'pos', 'values']:
a0 = getattr(l0, attr)
a1 = getattr(l1, attr)
assert_array_almost_equal(a0, a1, decimal)
def test_copy():
"""Test label copying."""
label = read_label(label_fname)
label_2 = label.copy()
label_2.pos += 1
assert_array_equal(label.pos, label_2.pos - 1)
def test_label_subject():
"""Test label subject name extraction."""
label = read_label(label_fname)
assert label.subject is None
assert ('unknown' in repr(label))
label = read_label(label_fname, subject='fsaverage')
assert (label.subject == 'fsaverage')
assert ('fsaverage' in repr(label))
def test_label_addition():
"""Test label addition."""
pos = np.random.RandomState(0).rand(10, 3)
values = np.arange(10.) / 10
idx0 = list(range(7))
idx1 = list(range(7, 10)) # non-overlapping
idx2 = list(range(5, 10)) # overlapping
l0 = Label(idx0, pos[idx0], values[idx0], 'lh', color='red')
l1 = Label(idx1, pos[idx1], values[idx1], 'lh')
l2 = Label(idx2, pos[idx2], values[idx2], 'lh', color=(0, 1, 0, .5))
assert_equal(len(l0), len(idx0))
l_good = l0.copy()
l_good.subject = 'sample'
l_bad = l1.copy()
l_bad.subject = 'foo'
pytest.raises(ValueError, l_good.__add__, l_bad)
pytest.raises(TypeError, l_good.__add__, 'foo')
pytest.raises(ValueError, l_good.__sub__, l_bad)
pytest.raises(TypeError, l_good.__sub__, 'foo')
# adding non-overlapping labels
l01 = l0 + l1
assert_equal(len(l01), len(l0) + len(l1))
assert_array_equal(l01.values[:len(l0)], l0.values)
assert_equal(l01.color, l0.color)
# subtraction
assert_labels_equal(l01 - l0, l1, comment=False, color=False)
assert_labels_equal(l01 - l1, l0, comment=False, color=False)
# adding overlapping labels
l02 = l0 + l2
i0 = np.where(l0.vertices == 6)[0][0]
i2 = np.where(l2.vertices == 6)[0][0]
i = np.where(l02.vertices == 6)[0][0]
assert_equal(l02.values[i], l0.values[i0] + l2.values[i2])
assert_equal(l02.values[0], l0.values[0])
assert_array_equal(np.unique(l02.vertices), np.unique(idx0 + idx2))
assert_equal(l02.color, _blend_colors(l0.color, l2.color))
# adding lh and rh
l2.hemi = 'rh'
bhl = l0 + l2
assert_equal(bhl.hemi, 'both')
assert_equal(len(bhl), len(l0) + len(l2))
assert_equal(bhl.color, l02.color)
assert ('BiHemiLabel' in repr(bhl))
# subtraction
assert_labels_equal(bhl - l0, l2)
assert_labels_equal(bhl - l2, l0)
bhl2 = l1 + bhl
assert_labels_equal(bhl2.lh, l01)
assert_equal(bhl2.color, _blend_colors(l1.color, bhl.color))
assert_array_equal((l2 + bhl).rh.vertices, bhl.rh.vertices) # rh label
assert_array_equal((bhl + bhl).lh.vertices, bhl.lh.vertices)
pytest.raises(TypeError, bhl.__add__, 5)
# subtraction
bhl_ = bhl2 - l1
assert_labels_equal(bhl_.lh, bhl.lh, comment=False, color=False)
assert_labels_equal(bhl_.rh, bhl.rh)
assert_labels_equal(bhl2 - l2, l0 + l1)
assert_labels_equal(bhl2 - l1 - l0, l2)
bhl_ = bhl2 - bhl2
assert_array_equal(bhl_.vertices, [])
@testing.requires_testing_data
@pytest.mark.parametrize('fname', (real_label_fname, v1_label_fname))
def test_label_fill_restrict(fname):
"""Test label in fill and restrict."""
src = read_source_spaces(src_fname)
label = read_label(fname)
# construct label from source space vertices
label_src = label.restrict(src)
vert_in_src = label_src.vertices
values_in_src = label_src.values
if check_version('scipy', '1.3') and fname == real_label_fname:
# Check that we can auto-fill patch info quickly for one condition
for s in src:
s['nearest'] = None
with pytest.warns(None):
label_src = label_src.fill(src)
else:
label_src = label_src.fill(src)
assert src[0]['nearest'] is not None
# check label vertices
vertices_status = np.in1d(src[0]['nearest'], label.vertices)
vertices_in = np.nonzero(vertices_status)[0]
vertices_out = np.nonzero(np.logical_not(vertices_status))[0]
assert_array_equal(label_src.vertices, vertices_in)
assert_array_equal(np.in1d(vertices_out, label_src.vertices), False)
# check values
value_idx = np.digitize(src[0]['nearest'][vertices_in], vert_in_src, True)
assert_array_equal(label_src.values, values_in_src[value_idx])
# test exception
vertices = np.append([-1], vert_in_src)
with pytest.raises(ValueError, match='does not contain all of the label'):
Label(vertices, hemi='lh').fill(src)
# test filling empty label
label = Label([], hemi='lh')
label.fill(src)
assert_array_equal(label.vertices, np.array([], int))
@testing.requires_testing_data
def test_label_io_and_time_course_estimates():
"""Test IO for label + stc files."""
stc = read_source_estimate(stc_fname)
label = read_label(real_label_fname)
stc_label = stc.in_label(label)
assert (len(stc_label.times) == stc_label.data.shape[1])
assert (len(stc_label.vertices[0]) == stc_label.data.shape[0])
@testing.requires_testing_data
def test_label_io():
"""Test IO of label files."""
tempdir = _TempDir()
label = read_label(label_fname)
# label attributes
assert_equal(label.name, 'test-lh')
assert label.subject is None
assert label.color is None
# save and reload
label.save(op.join(tempdir, 'foo'))
label2 = read_label(op.join(tempdir, 'foo-lh.label'))
assert_labels_equal(label, label2)
# pickling
dest = op.join(tempdir, 'foo.pickled')
with open(dest, 'wb') as fid:
pickle.dump(label, fid, pickle.HIGHEST_PROTOCOL)
with open(dest, 'rb') as fid:
label2 = pickle.load(fid)
assert_labels_equal(label, label2)
def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
"""Ensure two sets of labels are equal."""
for label_a, label_b in zip(labels_a, labels_b):
assert_array_equal(label_a.vertices, label_b.vertices)
assert (label_a.name == label_b.name)
assert (label_a.hemi == label_b.hemi)
if not ignore_pos:
assert_array_equal(label_a.pos, label_b.pos)
@testing.requires_testing_data
def test_annot_io():
"""Test I/O from and to *.annot files."""
# copy necessary files from fsaverage to tempdir
tempdir = _TempDir()
subject = 'fsaverage'
label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
label_dir = os.path.join(tempdir, subject, 'label')
surf_dir = os.path.join(tempdir, subject, 'surf')
os.makedirs(label_dir)
os.mkdir(surf_dir)
shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)
# read original labels
with pytest.raises(IOError, match='\nPALS_B12_Lobes$'):
read_labels_from_annot(subject, 'PALS_B12_Lobesey',
subjects_dir=tempdir)
labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
subjects_dir=tempdir)
# test saving parcellation only covering one hemisphere
parc = [label for label in labels if label.name == 'LOBE.TEMPORAL-lh']
write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
parc1 = [label for label in parc1 if not label.name.startswith('unknown')]
assert_equal(len(parc1), len(parc))
for lt, rt in zip(parc1, parc):
assert_labels_equal(lt, rt)
# test saving only one hemisphere
parc = [label for label in labels if label.name.startswith('LOBE')]
write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
subjects_dir=tempdir)
annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
assert os.path.isfile(annot_fname % 'l')
assert not os.path.isfile(annot_fname % 'r')
parc1 = read_labels_from_annot(subject, 'myparc2',
annot_fname=annot_fname % 'l',
subjects_dir=tempdir)
parc_lh = [label for label in parc if label.name.endswith('lh')]
for lt, rt in zip(parc1, parc_lh):
assert_labels_equal(lt, rt)
# test that the annotation is complete (test Label() support)
rr = read_surface(op.join(surf_dir, 'lh.white'))[0]
label = sum(labels, Label(hemi='lh', subject='fsaverage')).lh
assert_array_equal(label.vertices, np.arange(len(rr)))
@testing.requires_testing_data
def test_morph_labels():
"""Test morph_labels."""
# Just process the first 5 labels for speed
parc_fsaverage = read_labels_from_annot(
'fsaverage', 'aparc', subjects_dir=subjects_dir)[:5]
parc_sample = read_labels_from_annot(
'sample', 'aparc', subjects_dir=subjects_dir)[:5]
parc_fssamp = morph_labels(
parc_fsaverage, 'sample', subjects_dir=subjects_dir)
for lf, ls, lfs in zip(parc_fsaverage, parc_sample, parc_fssamp):
assert lf.hemi == ls.hemi == lfs.hemi
assert lf.name == ls.name == lfs.name
perc_1 = np.in1d(lfs.vertices, ls.vertices).mean() * 100
perc_2 = np.in1d(ls.vertices, lfs.vertices).mean() * 100
# Ideally this would be 100%, but we do not use the same algorithm
# as FreeSurfer ...
assert perc_1 > 92
assert perc_2 > 88
with pytest.raises(ValueError, match='wrong and fsaverage'):
morph_labels(parc_fsaverage, 'sample', subjects_dir=subjects_dir,
subject_from='wrong')
with pytest.raises(RuntimeError, match='Number of surface vertices'):
_load_vert_pos('sample', subjects_dir, 'white', 'lh', 1)
for label in parc_fsaverage:
label.subject = None
with pytest.raises(ValueError, match='subject_from must be provided'):
morph_labels(parc_fsaverage, 'sample', subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_labels_to_stc():
"""Test labels_to_stc."""
labels = read_labels_from_annot(
'sample', 'aparc', subjects_dir=subjects_dir)
values = np.random.RandomState(0).randn(len(labels))
with pytest.raises(ValueError, match='1 or 2 dim'):
labels_to_stc(labels, values[:, np.newaxis, np.newaxis])
with pytest.raises(ValueError, match=r'values\.shape'):
labels_to_stc(labels, values[np.newaxis])
with pytest.raises(ValueError, match='multiple values of subject'):
labels_to_stc(labels, values, subject='foo')
stc = labels_to_stc(labels, values)
assert stc.subject == 'sample'
for value, label in zip(values, labels):
stc_label = stc.in_label(label)
assert (stc_label.data == value).all()
stc = read_source_estimate(stc_fname, 'sample')
@testing.requires_testing_data
def test_read_labels_from_annot(tmpdir):
"""Test reading labels from FreeSurfer parcellation."""
# test some invalid inputs
pytest.raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
subjects_dir=subjects_dir)
pytest.raises(ValueError, read_labels_from_annot, 'sample',
annot_fname='bla.annot', subjects_dir=subjects_dir)
with pytest.raises(IOError, match='does not exist'):
_read_annot_cands('foo')
with pytest.raises(IOError, match='no candidate'):
_read_annot(str(tmpdir))
# read labels using hemi specification
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
for label in labels_lh:
assert label.name.endswith('-lh')
assert label.hemi == 'lh'
assert label.color is not None
# read labels using annot_fname
annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
labels_rh = read_labels_from_annot('sample', annot_fname=annot_fname,
subjects_dir=subjects_dir)
for label in labels_rh:
assert label.name.endswith('-rh')
assert label.hemi == 'rh'
assert label.color is not None
# combine the lh, rh, labels and sort them
labels_lhrh = list()
labels_lhrh.extend(labels_lh)
labels_lhrh.extend(labels_rh)
names = [label.name for label in labels_lhrh]
labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]
# read all labels at once
labels_both = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result
_assert_labels_equal(labels_lhrh, labels_both)
# aparc has 68 cortical labels
assert (len(labels_both) == 68)
# test regexp
label = read_labels_from_annot('sample', parc='aparc.a2009s',
regexp='Angu', subjects_dir=subjects_dir)[0]
assert (label.name == 'G_pariet_inf-Angular-lh')
# silly, but real regexp:
label = read_labels_from_annot('sample', 'aparc.a2009s',
regexp='.*-.{4,}_.{3,3}-L',
subjects_dir=subjects_dir)[0]
assert (label.name == 'G_oc-temp_med-Lingual-lh')
pytest.raises(RuntimeError, read_labels_from_annot, 'sample', parc='aparc',
annot_fname=annot_fname, regexp='JackTheRipper',
subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_read_labels_from_annot_annot2labels():
"""Test reading labels from parc. by comparing with mne_annot2labels."""
label_fnames = glob.glob(label_dir + '/*.label')
label_fnames.sort()
labels_mne = [read_label(fname) for fname in label_fnames]
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result, mne does not fill pos, so ignore it
_assert_labels_equal(labels, labels_mne, ignore_pos=True)
@testing.requires_testing_data
def test_write_labels_to_annot():
"""Test writing FreeSurfer parcellation from labels."""
tempdir = _TempDir()
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# create temporary subjects-dir skeleton
surf_dir = op.join(subjects_dir, 'sample', 'surf')
temp_surf_dir = op.join(tempdir, 'sample', 'surf')
os.makedirs(temp_surf_dir)
shutil.copy(op.join(surf_dir, 'lh.white'), temp_surf_dir)
shutil.copy(op.join(surf_dir, 'rh.white'), temp_surf_dir)
os.makedirs(op.join(tempdir, 'sample', 'label'))
# test automatic filenames
dst = op.join(tempdir, 'sample', 'label', '%s.%s.annot')
write_labels_to_annot(labels, 'sample', 'test1', subjects_dir=tempdir)
assert (op.exists(dst % ('lh', 'test1')))
assert (op.exists(dst % ('rh', 'test1')))
# lh only
for label in labels:
if label.hemi == 'lh':
break
write_labels_to_annot([label], 'sample', 'test2', subjects_dir=tempdir)
assert (op.exists(dst % ('lh', 'test2')))
assert (op.exists(dst % ('rh', 'test2')))
# rh only
for label in labels:
if label.hemi == 'rh':
break
write_labels_to_annot([label], 'sample', 'test3', subjects_dir=tempdir)
assert (op.exists(dst % ('lh', 'test3')))
assert (op.exists(dst % ('rh', 'test3')))
# label alone
pytest.raises(TypeError, write_labels_to_annot, labels[0], 'sample',
'test4', subjects_dir=tempdir)
# write left and right hemi labels with filenames:
fnames = [op.join(tempdir, hemi + '-myparc') for hemi in ['lh', 'rh']]
for fname in fnames:
with pytest.warns(RuntimeWarning, match='subjects_dir'):
write_labels_to_annot(labels, annot_fname=fname)
# read it back
labels2 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels22 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels2.extend(labels22)
names = [label.name for label in labels2]
for label in labels:
idx = names.index(label.name)
assert_labels_equal(label, labels2[idx])
# same with label-internal colors
for fname in fnames:
write_labels_to_annot(labels, 'sample', annot_fname=fname,
overwrite=True, subjects_dir=subjects_dir)
labels3 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels33 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels3.extend(labels33)
names3 = [label.name for label in labels3]
for label in labels:
idx = names3.index(label.name)
assert_labels_equal(label, labels3[idx])
# make sure we can't overwrite things
pytest.raises(ValueError, write_labels_to_annot, labels, 'sample',
annot_fname=fnames[0], subjects_dir=subjects_dir)
# however, this works
write_labels_to_annot(labels, 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
# label without color
labels_ = labels[:]
labels_[0] = labels_[0].copy()
labels_[0].color = None
write_labels_to_annot(labels_, 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
# duplicate color
labels_[0].color = labels_[2].color
pytest.raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# invalid color inputs
labels_[0].color = (1.1, 1., 1., 1.)
pytest.raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# overlapping labels
labels_ = labels[:]
cuneus_lh = labels[6]
precuneus_lh = labels[50]
labels_.append(precuneus_lh + cuneus_lh)
pytest.raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# unlabeled vertices
labels_lh = [label for label in labels if label.name.endswith('lh')]
write_labels_to_annot(labels_lh[1:], 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
labels_reloaded = read_labels_from_annot('sample', annot_fname=fnames[0],
subjects_dir=subjects_dir)
assert_equal(len(labels_lh), len(labels_reloaded))
label0 = labels_lh[0]
label1 = labels_reloaded[-1]
assert_equal(label1.name, "unknown-lh")
assert (np.all(np.in1d(label0.vertices, label1.vertices)))
# unnamed labels
labels4 = labels[:]
labels4[0].name = None
pytest.raises(ValueError, write_labels_to_annot, labels4,
annot_fname=fnames[0])
@requires_sklearn
@testing.requires_testing_data
def test_split_label():
"""Test splitting labels."""
aparc = read_labels_from_annot('fsaverage', 'aparc', 'lh',
regexp='lingual', subjects_dir=subjects_dir)
lingual = aparc[0]
# Test input error
pytest.raises(ValueError, lingual.split, 'bad_input_string')
# split with names
parts = ('lingual_post', 'lingual_ant')
post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)
# check output names
assert_equal(post.name, parts[0])
assert_equal(ant.name, parts[1])
# check vertices add up
lingual_reconst = post + ant
lingual_reconst.name = lingual.name
lingual_reconst.comment = lingual.comment
lingual_reconst.color = lingual.color
assert_labels_equal(lingual_reconst, lingual)
# compare output of Label.split() method
post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
assert_labels_equal(post1, post)
assert_labels_equal(ant1, ant)
# compare fs_like split with freesurfer split
antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
assert_array_equal(antmost.vertices, fs_vert)
# check default label name
assert_equal(antmost.name, "lingual_div40-lh")
# Apply contiguous splitting to DMN label from parcellation in Yeo, 2011
label_default_mode = read_label(op.join(subjects_dir, 'fsaverage', 'label',
'lh.7Networks_7.label'))
DMN_sublabels = label_default_mode.split(parts='contiguous',
subject='fsaverage',
subjects_dir=subjects_dir)
assert_equal([len(label.vertices) for label in DMN_sublabels],
[16181, 7022, 5965, 5300, 823] + [1] * 23)
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_sklearn
def test_stc_to_label():
"""Test stc_to_label."""
src = read_source_spaces(fwd_fname)
src_bad = read_source_spaces(src_bad_fname)
stc = read_source_estimate(stc_fname, 'sample')
os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
labels1 = _stc_to_label(stc, src='sample', smooth=3)
labels2 = _stc_to_label(stc, src=src, smooth=3)
assert_equal(len(labels1), len(labels2))
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
with pytest.warns(RuntimeWarning, match='have holes'):
labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True,
connected=True)
pytest.raises(ValueError, stc_to_label, stc, 'sample', smooth=True,
connected=True)
pytest.raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad,
connected=True)
assert_equal(len(labels_lh), 1)
assert_equal(len(labels_rh), 1)
# test getting tris
tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
pytest.raises(ValueError, spatial_tris_adjacency, tris,
remap_vertices=False)
adjacency = spatial_tris_adjacency(tris, remap_vertices=True)
assert (adjacency.shape[0] == len(stc.vertices[0]))
# "src" as a subject name
pytest.raises(TypeError, stc_to_label, stc, src=1, smooth=False,
connected=False, subjects_dir=subjects_dir)
pytest.raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]),
smooth=False, connected=False, subjects_dir=subjects_dir)
pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=False,
connected=True, subjects_dir=subjects_dir)
pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=True,
connected=False, subjects_dir=subjects_dir)
labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False,
connected=False,
subjects_dir=subjects_dir)
assert (len(labels_lh) > 1)
assert (len(labels_rh) > 1)
# with smooth='patch'
with pytest.warns(RuntimeWarning, match='have holes'):
labels_patch = stc_to_label(stc, src=src, smooth=True)
assert len(labels_patch) == len(labels1)
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_morph():
"""Test inter-subject label morphing."""
label_orig = read_label(real_label_fname)
label_orig.subject = 'sample'
# should work for specifying vertices for both hemis, or just the
# hemi of the given label
vals = list()
for grade in [5, [np.arange(10242), np.arange(10242)], np.arange(10242)]:
label = label_orig.copy()
# this should throw an error because the label has all zero values
pytest.raises(ValueError, label.morph, 'sample', 'fsaverage')
label.values.fill(1)
label = label.morph(None, 'fsaverage', 5, grade, subjects_dir, 1)
label = label.morph('fsaverage', 'sample', 5, None, subjects_dir, 2)
assert (np.in1d(label_orig.vertices, label.vertices).all())
assert (len(label.vertices) < 3 * len(label_orig.vertices))
vals.append(label.vertices)
assert_array_equal(vals[0], vals[1])
# make sure label smoothing can run
assert_equal(label.subject, 'sample')
verts = [np.arange(10242), np.arange(10242)]
for hemi in ['lh', 'rh']:
label.hemi = hemi
with pytest.warns(None): # morph map maybe missing
label.morph(None, 'fsaverage', 5, verts, subjects_dir, 2)
pytest.raises(TypeError, label.morph, None, 1, 5, verts,
subjects_dir, 2)
pytest.raises(TypeError, label.morph, None, 'fsaverage', 5.5, verts,
subjects_dir, 2)
with pytest.warns(None): # morph map maybe missing
label.smooth(subjects_dir=subjects_dir) # make sure this runs
@testing.requires_testing_data
def test_grow_labels():
"""Test generation of circular source labels."""
seeds = [0, 50000]
# these were chosen manually in mne_analyze
should_be_in = [[49, 227], [51207, 48794]]
hemis = [0, 1]
names = ['aneurism', 'tumor']
labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, names=names)
tgt_names = ['aneurism-lh', 'tumor-rh']
tgt_hemis = ['lh', 'rh']
for label, seed, hemi, sh, name in zip(labels, seeds, tgt_hemis,
should_be_in, tgt_names):
assert (np.any(label.vertices == seed))
assert (np.all(np.in1d(sh, label.vertices)))
assert_equal(label.hemi, hemi)
assert_equal(label.name, name)
# grow labels with and without overlap
seeds = [57532, [58887, 6304]]
l01, l02 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir)
seeds = [57532, [58887, 6304]]
l11, l12 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False)
# test label naming
assert_equal(l01.name, 'Label_0-lh')
assert_equal(l02.name, 'Label_1-lh')
assert_equal(l11.name, 'Label_0-lh')
assert_equal(l12.name, 'Label_1-lh')
# test color assignment
l11_c, l12_c = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False, colors=None)
assert_equal(l11_c.color, _n_colors(2)[0])
assert_equal(l12_c.color, _n_colors(2)[1])
lab_colors = np.array([[0, 0, 1, 1], [1, 0, 0, 1]])
l11_c, l12_c = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False, colors=lab_colors)
assert_array_equal(l11_c.color, lab_colors[0, :])
assert_array_equal(l12_c.color, lab_colors[1, :])
lab_colors = np.array([.1, .2, .3, .9])
l11_c, l12_c = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False, colors=lab_colors)
assert_array_equal(l11_c.color, lab_colors)
assert_array_equal(l12_c.color, lab_colors)
# make sure set 1 does not overlap
overlap = np.intersect1d(l11.vertices, l12.vertices, True)
assert_array_equal(overlap, [])
# make sure both sets cover the same vertices
l0 = l01 + l02
l1 = l11 + l12
assert_array_equal(l1.vertices, l0.vertices)
@testing.requires_testing_data
def test_random_parcellation():
"""Test generation of random cortical parcellation."""
hemi = 'both'
n_parcel = 50
surface = 'sphere.reg'
subject = 'sample_ds'
rng = np.random.RandomState(0)
# Parcellation
labels = random_parcellation(subject, n_parcel, hemi, subjects_dir,
surface=surface, random_state=rng)
# test number of labels
assert_equal(len(labels), n_parcel)
if hemi == 'both':
hemi = ['lh', 'rh']
hemis = np.atleast_1d(hemi)
for hemi in set(hemis):
vertices_total = []
for label in labels:
if label.hemi == hemi:
# test that labels are not empty
assert (len(label.vertices) > 0)
# vertices of hemi covered by labels
vertices_total = np.append(vertices_total, label.vertices)
# test that labels don't intersect
assert_equal(len(np.unique(vertices_total)), len(vertices_total))
surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
surface)
vert, _ = read_surface(surf_fname)
# Test that labels cover whole surface
assert_array_equal(np.sort(vertices_total), np.arange(len(vert)))
@testing.requires_testing_data
def test_label_sign_flip():
"""Test label sign flip computation."""
src = read_source_spaces(src_fname)
label = Label(vertices=src[0]['vertno'][:5], hemi='lh')
src[0]['nn'][label.vertices] = np.array(
[[1., 0., 0.],
[0., 1., 0.],
[0, 0, 1.],
[1. / np.sqrt(2), 1. / np.sqrt(2), 0.],
[1. / np.sqrt(2), 1. / np.sqrt(2), 0.]])
known_flips = np.array([1, 1, np.nan, 1, 1])
idx = [0, 1, 3, 4] # indices that are usable (third row is orthognoal)
flip = label_sign_flip(label, src)
assert_array_almost_equal(np.dot(flip[idx], known_flips[idx]), len(idx))
bi_label = label + Label(vertices=src[1]['vertno'][:5], hemi='rh')
src[1]['nn'][src[1]['vertno'][:5]] = -src[0]['nn'][label.vertices]
flip = label_sign_flip(bi_label, src)
known_flips = np.array([1, 1, np.nan, 1, 1, 1, 1, np.nan, 1, 1])
idx = [0, 1, 3, 4, 5, 6, 8, 9]
assert_array_almost_equal(np.dot(flip[idx], known_flips[idx]), 0.)
src[1]['nn'][src[1]['vertno'][:5]] *= -1
flip = label_sign_flip(bi_label, src)
assert_array_almost_equal(np.dot(flip[idx], known_flips[idx]), len(idx))
@testing.requires_testing_data
def test_label_center_of_mass():
"""Test computing the center of mass of a label."""
stc = read_source_estimate(stc_fname)
stc.lh_data[:] = 0
vertex_stc = stc.center_of_mass('sample', subjects_dir=subjects_dir)[0]
assert_equal(vertex_stc, 124791)
label = Label(stc.vertices[1], pos=None, values=stc.rh_data.mean(axis=1),
hemi='rh', subject='sample')
vertex_label = label.center_of_mass(subjects_dir=subjects_dir)
assert_equal(vertex_label, vertex_stc)
labels = read_labels_from_annot('sample', parc='aparc.a2009s',
subjects_dir=subjects_dir)
src = read_source_spaces(src_fname)
# Try a couple of random ones, one from left and one from right
# Visually verified in about the right place using mne_analyze
for label, expected in zip([labels[2], labels[3], labels[-5]],
[141162, 145221, 55979]):
label.values[:] = -1
pytest.raises(ValueError, label.center_of_mass,
subjects_dir=subjects_dir)
label.values[:] = 0
pytest.raises(ValueError, label.center_of_mass,
subjects_dir=subjects_dir)
label.values[:] = 1
assert_equal(label.center_of_mass(subjects_dir=subjects_dir), expected)
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=label.vertices),
expected)
# restrict to source space
idx = 0 if label.hemi == 'lh' else 1
# this simple nearest version is not equivalent, but is probably
# close enough for many labels (including the test ones):
pos = label.pos[np.where(label.vertices == expected)[0][0]]
pos = (src[idx]['rr'][src[idx]['vertno']] - pos)
pos = np.argmin(np.sum(pos * pos, axis=1))
src_expected = src[idx]['vertno'][pos]
# see if we actually get the same one
src_restrict = np.intersect1d(label.vertices, src[idx]['vertno'])
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=src_restrict),
src_expected)
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=src),
src_expected)
# degenerate cases
pytest.raises(ValueError, label.center_of_mass, subjects_dir=subjects_dir,
restrict_vertices='foo')
pytest.raises(TypeError, label.center_of_mass, subjects_dir=subjects_dir,
surf=1)
pytest.raises(IOError, label.center_of_mass, subjects_dir=subjects_dir,
surf='foo')
run_tests_if_main()
@testing.requires_testing_data
def test_select_sources():
"""Test the selection of sources for simulation."""
subject = 'sample'
label_file = op.join(subjects_dir, subject, 'label', 'aparc',
'temporalpole-rh.label')
# Regardless of other parameters, using extent 0 should always yield a
# a single source.
tp_label = read_label(label_file)
tp_label.values[:] = 1
labels = ['lh', tp_label]
locations = ['random', 'center']
for label, location in product(labels, locations):
label = select_sources(
subject, label, location, extent=0, subjects_dir=subjects_dir)
assert (len(label.vertices) == 1)
# As we increase the extent, the new region should contain the previous
# one.
label = select_sources(subject, 'lh', 0, extent=0,
subjects_dir=subjects_dir)
for extent in range(1, 3):
new_label = select_sources(subject, 'lh', 0, extent=extent * 2,
subjects_dir=subjects_dir)
assert (set(new_label.vertices) > set(label.vertices))
assert (new_label.hemi == 'lh')
label = new_label
# With a large enough extent and not allowing growing outside the label,
# every vertex of the label should be in the region.
label = select_sources(subject, tp_label, 0, extent=30,
grow_outside=False, subjects_dir=subjects_dir)
assert (set(label.vertices) == set(tp_label.vertices))
# Without this restriction, we should get new vertices.
label = select_sources(subject, tp_label, 0, extent=30,
grow_outside=True, subjects_dir=subjects_dir)
assert (set(label.vertices) > set(tp_label.vertices))
# Other parameters are taken into account.
label = select_sources(subject, tp_label, 0, extent=10,
grow_outside=False, subjects_dir=subjects_dir,
name='mne')
assert (label.name == 'mne')
assert (label.hemi == 'rh')
| bsd-3-clause |
Divergent914/kddcup2015 | modeling.py | 1 | 8187 | #! /usr/local/bin/python3
# -*- utf-8 -*-
"""
Generate model with respect to dataset.
"""
import logging
import sys
import util
import dataset
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG,
format='%(asctime)s %(name)s %(levelname)s\t%(message)s')
logger = logging.getLogger('modeling')
def auc_score(clf, X, y):
from sklearn.metrics import roc_auc_score
return roc_auc_score(y, clf.predict_proba(X)[:, 1])
def to_submission(clf, filename):
path = filename
if not path.startswith('submission/'):
path = 'submission/' + path
if not path.endswith('.csv'):
path += '.not-submitted.csv'
Enroll_test = util.load_enrollment_test()['enrollment_id']
X_test = dataset.load_test()
y_test = clf.predict_proba(X_test)[:, 1]
lines = ['%d,%f\n' % l for l in zip(Enroll_test, y_test)]
with open(path, 'w') as f:
f.writelines(lines)
def lr():
"""
Submission: lr_0618.csv
E_val: <missing>
E_in: <missing>
E_out: 0.8119110960575004
"""
from sklearn.linear_model import LogisticRegressionCV
X = util.fetch(util.cache_path('train_X_before_2014-08-01_22-00-47'))
y = util.fetch(util.cache_path('train_y_before_2014-08-01_22-00-47'))
clf = LogisticRegressionCV(cv=10, scoring='roc_auc', n_jobs=-1)
clf.fit(X, y)
print(auc_score(clf, X, y))
to_submission(clf, 'lr_0618_xxx')
def lr_with_scale():
"""
Submission: lr_with_scale_0620_04.csv
E_val: <missing>
E_in: 0.857351105162
E_out: 0.854097855439904
"""
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
X = util.fetch(util.cache_path('train_X_before_2014-08-01_22-00-47'))
y = util.fetch(util.cache_path('train_y_before_2014-08-01_22-00-47'))
raw_scaler = StandardScaler()
raw_scaler.fit(X)
X_scaled = raw_scaler.transform(X)
clf = LogisticRegressionCV(cv=10, scoring='roc_auc', n_jobs=-1)
clf.fit(X_scaled, y)
print(auc_score(clf, X_scaled, y))
to_submission(Pipeline([('scale_raw', raw_scaler),
('lr', clf)]), 'lr_with_scale_0620_04')
def lr_with_fs():
"""
Submission: lr_with_fs_0620_02.csv
E_val: <missing>
E_in: 0.856252488379
E_out: 0.8552577388980213
"""
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
X = util.fetch(util.cache_path('train_X_before_2014-08-01_22-00-47'))
y = util.fetch(util.cache_path('train_y_before_2014-08-01_22-00-47'))
raw_scaler = StandardScaler()
raw_scaler.fit(X)
X_scaled = raw_scaler.transform(X)
rfe = util.fetch(util.cache_path('feature_selection.RFE.21'))
X_pruned = rfe.transform(X_scaled)
new_scaler = StandardScaler()
new_scaler.fit(X_pruned)
X_new = new_scaler.transform(X_pruned)
clf = LogisticRegressionCV(cv=10, scoring='roc_auc', n_jobs=-1)
clf.fit(X_new, y)
print(auc_score(clf, X_new, y))
to_submission(Pipeline([('scale_raw', raw_scaler),
('rfe', rfe),
('scale_new', new_scaler),
('lr', clf)]), 'lr_with_fs_0620_02')
def svc_1():
"""
Submission: svc_1_0620_01.csv
E_val: 0.866856950449
E_in: 0.855948
E_out: 0.8546898189645258
"""
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFE
from sklearn.grid_search import RandomizedSearchCV
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import LogisticRegression
from scipy.stats import expon
logger.debug('svc_1')
X = util.fetch(util.cache_path('train_X_before_2014-08-01_22-00-47'))
y = util.fetch(util.cache_path('train_y_before_2014-08-01_22-00-47'))
raw_scaler = StandardScaler()
raw_scaler.fit(X)
X_scaled = raw_scaler.transform(X)
rfe = RFE(estimator=LogisticRegression(class_weight='auto'), step=1,
n_features_to_select=21)
rfe.fit(X_scaled, y)
util.dump(rfe, util.cache_path('feature_selection.RFE.21'))
X_pruned = rfe.transform(X_scaled)
logger.debug('Features selected.')
new_scaler = StandardScaler()
new_scaler.fit(X_pruned)
X_new = new_scaler.transform(X_pruned)
svc = LinearSVC(dual=False, class_weight='auto')
rs = RandomizedSearchCV(svc, n_iter=50, scoring='roc_auc', n_jobs=-1,
cv=StratifiedKFold(y, 5),
param_distributions={'C': expon()})
rs.fit(X_new, y)
logger.debug('Got best SVC.')
logger.debug('Grid scores: %s', rs.grid_scores_)
logger.debug('Best score (E_val): %s', rs.best_score_)
logger.debug('Best params: %s', rs.best_params_)
svc = rs.best_estimator_
util.dump(svc, util.cache_path('new_data.SVC'))
isotonic = CalibratedClassifierCV(svc, cv=StratifiedKFold(y, 5),
method='isotonic')
isotonic.fit(X_new, y)
util.dump(isotonic,
util.cache_path('new_data.CalibratedClassifierCV.isotonic'))
logger.debug('Got best isotonic CalibratedClassifier.')
logger.debug('E_in (isotonic): %f', auc_score(isotonic, X_new, y))
to_submission(Pipeline([('scale_raw', raw_scaler),
('rfe', rfe),
('scale_new', new_scaler),
('svc', isotonic)]), 'svc_1_0620_01')
def sgd():
"""
Submission: sgd_0620_03.csv
E_val: 0.863628
E_in: 0.854373
E_out:
"""
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
X = util.fetch(util.cache_path('train_X_before_2014-08-01_22-00-47'))
y = util.fetch(util.cache_path('train_y_before_2014-08-01_22-00-47'))
raw_scaler = StandardScaler()
raw_scaler.fit(X)
X_scaled = raw_scaler.transform(X)
rfe = util.fetch(util.cache_path('feature_selection.RFE.21'))
X_pruned = rfe.transform(X_scaled)
new_scaler = StandardScaler()
new_scaler.fit(X_pruned)
X_new = new_scaler.transform(X_pruned)
sgd = SGDClassifier(n_iter=50, n_jobs=-1)
params = {
'loss': ['hinge', 'log', 'modified_huber', 'squared_hinge',
'perceptron', 'squared_loss', 'huber', 'epsilon_insensitive',
'squared_epsilon_insensitive']
}
grid = GridSearchCV(sgd, param_grid=params, cv=StratifiedKFold(y, 5),
scoring='roc_auc', n_jobs=-1)
grid.fit(X_new, y)
logger.debug('Best score (E_val): %f', grid.best_score_)
sgd = grid.best_estimator_
logger.debug('E_in: %f', auc_score(sgd, X_new, y))
to_submission(Pipeline([('scale_raw', raw_scaler),
('rfe', rfe),
('scale_new', new_scaler),
('sgd', sgd)]), 'sgd_0620_03')
def dt():
"""
Submission: dt_0620_05.csv
E_val: 0.820972
E_in: 0.835177
E_out:
Comment: {'max_depth': 5}
"""
from sklearn.tree import DecisionTreeClassifier, export_graphviz
X = util.fetch(util.cache_path('train_X_before_2014-08-01_22-00-47'))
y = util.fetch(util.cache_path('train_y_before_2014-08-01_22-00-47'))
dt = DecisionTreeClassifier(max_depth=5, class_weight='auto')
dt.fit(X, y)
export_graphviz(dt, 'tree.dot')
logger.debug('E_in: %f', auc_score(dt, X, y))
to_submission(dt, 'dt_0620_05')
if __name__ == '__main__':
from inspect import isfunction
variables = locals()
if len(sys.argv) > 1:
for fn in sys.argv[1:]:
if fn not in variables or not isfunction(variables[fn]):
print('function %s not found' % repr(fn))
variables[fn]()
| gpl-2.0 |
martinggww/lucasenlights | MachineLearning/DataScience/SparkKMeans.py | 3 | 1894 | from pyspark.mllib.clustering import KMeans
from numpy import array, random
from math import sqrt
from pyspark import SparkConf, SparkContext
from sklearn.preprocessing import scale
K = 5
# Boilerplate Spark stuff:
conf = SparkConf().setMaster("local").setAppName("SparkKMeans")
sc = SparkContext(conf = conf)
#Create fake income/age clusters for N people in k clusters
def createClusteredData(N, k):
random.seed(10)
pointsPerCluster = float(N)/k
X = []
for i in range (k):
incomeCentroid = random.uniform(20000.0, 200000.0)
ageCentroid = random.uniform(20.0, 70.0)
for j in range(int(pointsPerCluster)):
X.append([random.normal(incomeCentroid, 10000.0), random.normal(ageCentroid, 2.0)])
X = array(X)
return X
# Load the data; note I am normalizing it with scale() - very important!
data = sc.parallelize(scale(createClusteredData(100, K)))
# Build the model (cluster the data)
clusters = KMeans.train(data, K, maxIterations=10,
runs=10, initializationMode="random")
# Print out the cluster assignments
resultRDD = data.map(lambda point: clusters.predict(point)).cache()
print "Counts by value:"
counts = resultRDD.countByValue()
print counts
print "Cluster assignments:"
results = resultRDD.collect()
print results
# Evaluate clustering by computing Within Set Sum of Squared Errors
def error(point):
center = clusters.centers[clusters.predict(point)]
return sqrt(sum([x**2 for x in (point - center)]))
WSSSE = data.map(lambda point: error(point)).reduce(lambda x, y: x + y)
print("Within Set Sum of Squared Error = " + str(WSSSE))
# Things to try:
# What happens to WSSSE as you increase or decrease K? Why?
# What happens if you don't normalize the input data before clustering?
# What happens if you change the maxIterations or runs parameters?
| cc0-1.0 |
chanceraine/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/texmanager.py | 69 | 16818 | """
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = '\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!'
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
import copy, glob, os, shutil, sys, warnings
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
DEBUG = False
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
def dvipng_hack_alpha():
stdin, stdout = os.popen4('dvipng -version')
for line in stdout:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s'% version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
raise RuntimeError('Could not obtain dvipng version')
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None: oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
configdir = mpl.get_configdir()
texcache = os.path.join(configdir, 'tex.cache')
if os.path.exists(oldcache):
print >> sys.stderr, """\
WARNING: found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s"."""%(oldcache, texcache)
shutil.move(oldcache, texcache)
if not os.path.exists(texcache):
os.mkdir(texcache)
_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', r'\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = ('text.latex.preamble', )\
+ tuple(['font.'+n for n in ('family', ) + font_families])
def __init__(self):
if not os.path.isdir(self.texcache):
os.mkdir(self.texcache)
ff = rcParams['font.family'].lower()
if ff in self.font_families:
self.font_family = ff
else:
mpl.verbose.report('The %s font family is not compatible with LaTeX. serif will be used by default.' % ff, 'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in \
[(ff, ff.replace('-', '_')) for ff in self.font_families]:
for font in rcParams['font.'+font_family]:
if font.lower() in self.font_info:
found_font = self.font_info[font.lower()]
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print 'family: %s, font: %s, info: %s'%(font_family,
font, self.font_info[font.lower()])
break
else:
if DEBUG: print '$s font is not compatible with usetex'
else:
mpl.verbose.report('No LaTeX-compatible font found for the %s font family in rcParams. Using default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive': cmd.append(self.cursive[1])
while r'\usepackage{type1cm}' in cmd:
cmd.remove(r'\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd,
r'\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f'%fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = unicode(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys if rcParams[par] != \
self._rc_cache[par]]
if changed:
if DEBUG: print 'DEBUG following keys changed:', changed
for k in changed:
if DEBUG:
print 'DEBUG %-20s: %-10s -> %-10s' % \
(k, self._rc_cache[k], rcParams[k])
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG: print 'DEBUG RE-INIT\nold fontconfig:', self._fontconfig
self.__init__()
if DEBUG: print 'DEBUG fontconfig:', self._fontconfig
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s'% os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex'%basefile
fh = file(texfile, 'w')
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[papersize={72in,72in}, body={70in,70in}, margin={1in,1in}]{geometry}
\pagestyle{empty}
\begin{document}
\fontsize{%f}{%f}%s
\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize*1.25, tex)
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s)
except UnicodeEncodeError, err:
mpl.verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
fh.close()
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi'% basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'latex -interaction=nonstopmode %s > "%s"'\
%(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(('LaTeX was not able to process the following \
string:\n%s\nHere is the full report generated by LaTeX: \n\n'% repr(tex)) + report)
else: mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile+'*'):
if fname.endswith('dvi'): pass
elif fname.endswith('tex'): pass
else:
try: os.remove(fname)
except OSError: pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png'% basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o \
"%s" "%s" > "%s"'%(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + report)
else: mpl.verbose.report(report, 'debug')
try: os.remove(outfile)
except OSError: pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf'% basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"'\
%(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + fh.read())
else: mpl.verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
ps = file(psfile)
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s'%psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
hack = self._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1-X[:,:,0]
else:
alpha = X[:,:,-1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0,0,0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize: fontsize = rcParams['font.size']
if not dpi: dpi = rcParams['savefig.dpi']
r,g,b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:,:,0] = r
Z[:,:,1] = g
Z[:,:,2] = b
Z[:,:,3] = alpha
self.rgba_arrayd[key] = Z
return Z
| agpl-3.0 |
namvo88/Thesis-Quadrotor-Code | sw/airborne/test/math/compare_utm_enu.py | 77 | 2714 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import sys
import os
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", "../../../..")
sys.path.append(PPRZ_SRC + "/sw/lib/python")
from pprz_math.geodetic import *
from pprz_math.algebra import DoubleRMat, DoubleEulers, DoubleVect3
from math import radians, degrees, tan
import matplotlib.pyplot as plt
import numpy as np
# Origin at ENAC
UTM_EAST0 = 377349 # in m
UTM_NORTH0 = 4824583 # in m
UTM_ZONE0 = 31
ALT0 = 147.000 # in m
utm_origin = UtmCoor_d(north=UTM_NORTH0, east=UTM_EAST0, alt=ALT0, zone=UTM_ZONE0)
print("origin %s" % utm_origin)
lla_origin = utm_origin.to_lla()
ecef_origin = lla_origin.to_ecef()
ltp_origin = ecef_origin.to_ltp_def()
print(ltp_origin)
# convergence angle to "true north" is approx 1 deg here
earth_radius = 6378137.0
n = 0.9996 * earth_radius
UTM_DELTA_EAST = 500000.
dist_to_meridian = utm_origin.east - UTM_DELTA_EAST
conv = dist_to_meridian / n * tan(lla_origin.lat)
# or (middle meridian of UTM zone 31 is at 3deg)
#conv = atan(tan(lla_origin.lon - radians(3))*sin(lla_origin.lat))
print("approx. convergence angle (north error compared to meridian): %f deg" % degrees(conv))
# Rotation matrix to correct for "true north"
R = DoubleEulers(psi=-conv).to_rmat()
# calculate ENU coordinates for 100 points in 100m distance
nb_points = 100
dist_points = 100
enu_res = np.zeros((nb_points, 2))
enu_res_c = np.zeros((nb_points, 2))
utm_res = np.zeros((nb_points, 2))
for i in range(0, nb_points):
utm = UtmCoor_d()
utm.north = i * dist_points + utm_origin.north
utm.east = i * dist_points+ utm_origin.east
utm.alt = utm_origin.alt
utm.zone = utm_origin.zone
#print(utm)
utm_res[i, 0] = utm.east - utm_origin.east
utm_res[i, 1] = utm.north - utm_origin.north
lla = utm.to_lla()
#print(lla)
ecef = lla.to_ecef()
enu = ecef.to_enu(ltp_origin)
enu_res[i, 0] = enu.x
enu_res[i, 1] = enu.y
enu_c = R * DoubleVect3(enu.x, enu.y, enu.z)
enu_res_c[i, 0] = enu_c.x
enu_res_c[i, 1] = enu_c.y
#print(enu)
dist = np.linalg.norm(utm_res, axis=1)
error = np.linalg.norm(utm_res - enu_res, axis=1)
error_c = np.linalg.norm(utm_res - enu_res_c, axis=1)
plt.figure(1)
plt.subplot(311)
plt.title("utm vs. enu")
plt.plot(enu_res[:, 0], enu_res[:, 1], 'g', label="ENU")
plt.plot(utm_res[:, 0], utm_res[:, 1], 'r', label="UTM")
plt.ylabel("y/north [m]")
plt.xlabel("x/east [m]")
plt.legend(loc='upper left')
plt.subplot(312)
plt.plot(dist, error, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error [m]")
plt.subplot(313)
plt.plot(dist, error_c, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error with north fix [m]")
plt.show()
| gpl-2.0 |
dhhjx880713/GPy | GPy/plotting/plotly_dep/plot_definitions.py | 4 | 16743 | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy.plotting.matplot_dep.plot_definitions nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from ..abstract_plotting_library import AbstractPlottingLibrary
from .. import Tango
from . import defaults
import plotly
from plotly import tools
from plotly.graph_objs import Scatter, Scatter3d, Line,\
Marker, ErrorX, ErrorY, Bar, Heatmap, Trace,\
Annotations, Annotation, Contour, Font, Surface
from plotly.exceptions import PlotlyDictKeyError
SYMBOL_MAP = {
'o': 'dot',
'v': 'triangle-down',
'^': 'triangle-up',
'<': 'triangle-left',
'>': 'triangle-right',
's': 'square',
'+': 'cross',
'x': 'x',
'*': 'x', # no star yet in plotly!!
'D': 'diamond',
'd': 'diamond',
}
class PlotlyPlotsBase(AbstractPlottingLibrary):
def __init__(self):
super(PlotlyPlotsBase, self).__init__()
self._defaults = defaults.__dict__
self.current_states = dict()
def figure(self, rows=1, cols=1, specs=None, is_3d=False, **kwargs):
if specs is None:
specs = [[{'is_3d': is_3d}]*cols]*rows
figure = tools.make_subplots(rows, cols, specs=specs, **kwargs)
return figure
def new_canvas(self, figure=None, row=1, col=1, projection='2d',
xlabel=None, ylabel=None, zlabel=None,
title=None, xlim=None,
ylim=None, zlim=None, **kwargs):
#if 'filename' not in kwargs:
# print('PlotlyWarning: filename was not given, this may clutter your plotly workspace')
# filename = None
#else:
# filename = kwargs.pop('filename')
if figure is None:
figure = self.figure(is_3d=projection=='3d')
figure.layout.font = Font(family="Raleway, sans-serif")
if projection == '3d':
figure.layout.legend.x=.5
figure.layout.legend.bgcolor='#DCDCDC'
return (figure, row, col), kwargs
def add_to_canvas(self, canvas, traces, legend=False, **kwargs):
figure, row, col = canvas
def append_annotation(a, xref, yref):
if 'xref' not in a:
a['xref'] = xref
if 'yref' not in a:
a['yref'] = yref
figure.layout.annotations.append(a)
def append_trace(t, row, col):
figure.append_trace(t, row, col)
def recursive_append(traces):
if isinstance(traces, Annotations):
xref, yref = figure._grid_ref[row-1][col-1]
for a in traces:
append_annotation(a, xref, yref)
# elif isinstance(traces, (Trace)): # doesn't work
# elif type(traces) in [v for k,v in go.__dict__.iteritems()]:
elif isinstance(traces, (Scatter, Scatter3d, ErrorX,
ErrorY, Bar, Heatmap, Trace, Contour, Surface)):
try:
append_trace(traces, row, col)
except PlotlyDictKeyError:
# Its a dictionary of plots:
for t in traces:
recursive_append(traces[t])
elif isinstance(traces, (dict)):
for t in traces:
recursive_append(traces[t])
elif isinstance(traces, (tuple, list)):
for t in traces:
recursive_append(t)
recursive_append(traces)
figure.layout['showlegend'] = legend
return canvas
def show_canvas(self, canvas, filename=None, **kwargs):
return NotImplementedError
def scatter(self, ax, X, Y, Z=None, color=Tango.colorsHex['mediumBlue'], cmap=None, label=None, marker='o', marker_kwargs=None, **kwargs):
try:
marker = SYMBOL_MAP[marker]
except:
#not matplotlib marker
pass
marker_kwargs = marker_kwargs or {}
if 'symbol' not in marker_kwargs:
marker_kwargs['symbol'] = marker
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
showlegend=label is not None,
marker=Marker(color=color, colorscale=cmap, **marker_kwargs),
name=label, **kwargs)
return Scatter(x=X, y=Y, mode='markers', showlegend=label is not None,
marker=Marker(color=color, colorscale=cmap, **marker_kwargs),
name=label, **kwargs)
def plot(self, ax, X, Y, Z=None, color=None, label=None, line_kwargs=None, **kwargs):
if 'mode' not in kwargs:
kwargs['mode'] = 'lines'
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, showlegend=label is not None, line=Line(color=color, **line_kwargs or {}), name=label, **kwargs)
return Scatter(x=X, y=Y, showlegend=label is not None, line=Line(color=color, **line_kwargs or {}), name=label, **kwargs)
def plot_axis_lines(self, ax, X, color=Tango.colorsHex['mediumBlue'], label=None, marker_kwargs=None, **kwargs):
if X.shape[1] == 1:
annotations = Annotations()
for i, row in enumerate(X):
annotations.append(
Annotation(
text='',
x=row[0], y=0,
yref='paper',
ax=0, ay=20,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor=color,
showarrow=True,
#showlegend=i==0,
#label=label,
))
return annotations
elif X.shape[1] == 2:
marker_kwargs.setdefault('symbol', 'diamond')
opacity = kwargs.pop('opacity', .8)
return Scatter3d(x=X[:, 0], y=X[:, 1], z=np.zeros(X.shape[0]),
mode='markers',
projection=dict(z=dict(show=True, opacity=opacity)),
marker=Marker(color=color, **marker_kwargs or {}),
opacity=0,
name=label,
showlegend=label is not None, **kwargs)
def barplot(self, canvas, x, height, width=0.8, bottom=0, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
figure, _, _ = canvas
if 'barmode' in kwargs:
figure.layout['barmode'] = kwargs.pop('barmode')
return Bar(x=x, y=height, marker=Marker(color=color), name=label)
def xerrorbar(self, ax, X, Y, error, Z=None, color=Tango.colorsHex['mediumBlue'], label=None, error_kwargs=None, **kwargs):
error_kwargs = error_kwargs or {}
if (error.shape[0] == 2) and (error.ndim == 2):
error_kwargs.update(dict(array=error[1], arrayminus=error[0], symmetric=False))
else:
error_kwargs.update(dict(array=error, symmetric=True))
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
error_x=ErrorX(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None, **kwargs)
return Scatter(x=X, y=Y, mode='markers',
error_x=ErrorX(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None,
**kwargs)
def yerrorbar(self, ax, X, Y, error, Z=None, color=Tango.colorsHex['mediumBlue'], label=None, error_kwargs=None, **kwargs):
error_kwargs = error_kwargs or {}
if (error.shape[0] == 2) and (error.ndim == 2):
error_kwargs.update(dict(array=error[1], arrayminus=error[0], symmetric=False))
else:
error_kwargs.update(dict(array=error, symmetric=True))
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
error_y=ErrorY(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None, **kwargs)
return Scatter(x=X, y=Y, mode='markers',
error_y=ErrorY(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None,
**kwargs)
def imshow(self, ax, X, extent=None, label=None, vmin=None, vmax=None, **imshow_kwargs):
if not 'showscale' in imshow_kwargs:
imshow_kwargs['showscale'] = False
return Heatmap(z=X, name=label,
x0=extent[0], dx=float(extent[1]-extent[0])/(X.shape[0]-1),
y0=extent[2], dy=float(extent[3]-extent[2])/(X.shape[1]-1),
zmin=vmin, zmax=vmax,
showlegend=label is not None,
hoverinfo='z',
**imshow_kwargs)
def imshow_interact(self, ax, plot_function, extent=None, label=None, resolution=None, vmin=None, vmax=None, **imshow_kwargs):
# TODO stream interaction?
super(PlotlyPlotsBase, self).imshow_interact(ax, plot_function)
def annotation_heatmap(self, ax, X, annotation, extent=None, label='Gradient', imshow_kwargs=None, **annotation_kwargs):
imshow_kwargs.setdefault('label', label)
imshow_kwargs.setdefault('showscale', True)
imshow = self.imshow(ax, X, extent, **imshow_kwargs)
X = X-X.min()
X /= X.max()/2.
X -= 1
x = np.linspace(extent[0], extent[1], X.shape[0])
y = np.linspace(extent[2], extent[3], X.shape[1])
annotations = Annotations()
for n, row in enumerate(annotation):
for m, val in enumerate(row):
var = X[n][m]
annotations.append(
Annotation(
text=str(val),
x=x[m], y=y[n],
xref='x1', yref='y1',
font=dict(color='white' if np.abs(var) > 0.8 else 'black', size=10),
opacity=.5,
showarrow=False,
))
return imshow, annotations
def annotation_heatmap_interact(self, ax, plot_function, extent, label=None, resolution=15, imshow_kwargs=None, **annotation_kwargs):
super(PlotlyPlotsBase, self).annotation_heatmap_interact(ax, plot_function, extent)
def contour(self, ax, X, Y, C, levels=20, label=None, **kwargs):
return Contour(x=X, y=Y, z=C,
#ncontours=levels, contours=Contours(start=C.min(), end=C.max(), size=(C.max()-C.min())/levels),
name=label, **kwargs)
def surface(self, ax, X, Y, Z, color=None, label=None, **kwargs):
return Surface(x=X, y=Y, z=Z, name=label, showlegend=label is not None, **kwargs)
def fill_between(self, ax, X, lower, upper, color=Tango.colorsHex['mediumBlue'], label=None, line_kwargs=None, **kwargs):
if not 'line' in kwargs:
kwargs['line'] = Line(**line_kwargs or {})
else:
kwargs['line'].update(line_kwargs or {})
if color.startswith('#'):
fcolor = 'rgba({c[0]}, {c[1]}, {c[2]}, {alpha})'.format(c=Tango.hex2rgb(color), alpha=kwargs.get('opacity', 1.0))
else: fcolor = color
u = Scatter(x=X, y=upper, fillcolor=fcolor, showlegend=label is not None, name=label, fill='tonextx', legendgroup='{}_fill_({},{})'.format(label, ax[1], ax[2]), **kwargs)
#fcolor = '{}, {alpha})'.format(','.join(fcolor.split(',')[:-1]), alpha=0.0)
l = Scatter(x=X, y=lower, fillcolor=fcolor, showlegend=False, name=label, legendgroup='{}_fill_({},{})'.format(label, ax[1], ax[2]), **kwargs)
return l, u
def fill_gradient(self, canvas, X, percentiles, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
if color.startswith('#'):
colarray = Tango.hex2rgb(color)
opacity = .9
else:
colarray = map(float(color.strip(')').split('(')[1]))
if len(colarray) == 4:
colarray, opacity = colarray[:3] ,colarray[3]
alpha = opacity*(1.-np.abs(np.linspace(-1,1,len(percentiles)-1)))
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
polycol = []
for i, y1, a in zip(range(len(percentiles)), percentiles, alpha):
fcolor = 'rgba({}, {}, {}, {alpha})'.format(*colarray, alpha=a)
if i == len(percentiles)/2:
polycol.append(Scatter(x=X, y=y1, fillcolor=fcolor, showlegend=True,
name=label, line=Line(width=0, smoothing=0), mode='none', fill='tonextx',
legendgroup='density', hoverinfo='none', **kwargs))
else:
polycol.append(Scatter(x=X, y=y1, fillcolor=fcolor, showlegend=False,
name=None, line=Line(width=1, smoothing=0, color=fcolor), mode='none', fill='tonextx',
legendgroup='density', hoverinfo='none', **kwargs))
return polycol
class PlotlyPlotsOnline(PlotlyPlotsBase):
def __init__(self):
super(PlotlyPlotsOnline, self).__init__()
def show_canvas(self, canvas, filename=None, **kwargs):
figure, _, _ = canvas
if len(figure.data) == 0:
# add mock data
figure.append_trace(Scatter(x=[], y=[], name='', showlegend=False), 1, 1)
from ..gpy_plot.plot_util import in_ipynb
if in_ipynb():
return plotly.plotly.iplot(figure, filename=filename, **kwargs)
else:
return plotly.plotly.plot(figure, filename=filename, **kwargs)#self.current_states[hex(id(figure))]['filename'])
class PlotlyPlotsOffline(PlotlyPlotsBase):
def __init__(self):
super(PlotlyPlotsOffline, self).__init__()
def show_canvas(self, canvas, filename=None, **kwargs):
figure, _, _ = canvas
if len(figure.data) == 0:
# add mock data
figure.append_trace(Scatter(x=[], y=[], name='', showlegend=False), 1, 1)
from ..gpy_plot.plot_util import in_ipynb
if in_ipynb():
plotly.offline.init_notebook_mode(connected=True)
return plotly.offline.iplot(figure, filename=filename, **kwargs)#self.current_states[hex(id(figure))]['filename'])
else:
return plotly.offline.plot(figure, filename=filename, **kwargs)
| bsd-3-clause |
anugrah-saxena/pycroscopy | pycroscopy/io/translators/beps_data_generator.py | 1 | 26908 | """
Utility functions for the Fake BEPS generator
"""
import os
import numpy as np
from PIL import Image
from sklearn.utils import gen_batches
from skimage.measure import block_reduce
# Pycroscopy imports
from ..io_hdf5 import ioHDF5
from ..hdf_utils import calc_chunks, getH5DsetRefs, link_as_main, get_attr, buildReducedSpec
from ..io_utils import realToCompound, compound_to_scalar
from .utils import build_ind_val_dsets, generate_dummy_main_parms
from .translator import Translator
from ..microdata import MicroDataGroup, MicroDataset
from ...analysis.utils.be_loop import loop_fit_function
from ...analysis.utils.be_sho import SHOfunc
from ...analysis.be_sho_model import sho32
from ...analysis.be_loop_model import loop_fit32
from .df_utils.beps_gen_utils import get_noise_vec, beps_image_folder
from .df_utils.io_image import read_image, no_bin
class FakeDataGenerator(Translator):
"""
"""
# TODO: Add other cycle fractions
# TODO: Add support for other VS_modes
# TODO: Add support for other field modes
def __init__(self, *args, **kwargs):
"""
"""
super(FakeDataGenerator, self).__init__(*args, **kwargs)
self.N_x = None
self.N_y = None
self.n_steps = None
self.n_bins = None
self.start_freq = None
self.end_freq = None
self.n_cycles = None
self.forc_cycles = None
self.forc_repeats = None
self.loop_a = None
self.loop_b = None
self.data_type = None
self.mode = None
self.field_mode = None
self.n_pixels = None
self.n_loops = None
self.n_sho_bins = None
self.n_spec_bins = None
self.n_fields = None
self.binning_func = no_bin
def _read_data(self, folder):
"""
Returns
-------
"""
file_list = self._parse_file_path(folder, self.image_ext)
images = list()
for image_file in file_list:
image_path = os.path.join(folder, image_file)
image, _ = read_image(image_path, as_grey=True)
image = self.binning_func(image, self.bin_factor, self.bin_func)
images.append(image)
self.N_x, self.N_y = image.shape
self.n_pixels = self.N_x*self.N_y
return images
@staticmethod
def _parse_file_path(path, ftype='all'):
"""
Returns a list of all files in the directory given by path
Parameters
---------------
path : string / unicode
absolute path to directory containing files
ftype : this file types to return in file_list. (optional. Default is all)
Returns
----------
file_list : list of strings
names of all files in directory located at path
numfiles : unsigned int
number of files in file_list
"""
# Make sure we have a proper path to the images to use
if path is None:
path = os.path.join(os.getcwd(), 'df_utils/beps_data_gen_images')
else:
path = os.path.abspath(path)
# Get all files in directory
file_list = os.listdir(path)
# If no file type specified, return full list
if ftype == 'all':
return file_list
# Remove files of type other than the request ftype from the list
new_file_list = []
for this_thing in file_list:
# Make sure it's really a file
if not os.path.isfile(os.path.join(path, this_thing)):
continue
split = os.path.splitext(this_thing)
ext = split[1]
if ext == ftype:
new_file_list.append(os.path.join(path, this_thing))
return new_file_list
def translate(self, h5_path, n_steps, n_bins, start_freq, end_freq,
data_type='BEPSData', mode='DC modulation mode', field_mode='in and out-of-field',
n_cycles=1, FORC_cycles=1, FORC_repeats=1, loop_a=1, loop_b=4,
cycle_frac='full', image_folder=beps_image_folder, bin_factor=None,
bin_func=np.mean, image_type='.tif'):
"""
Parameters
----------
h5_path : str
Desired path to write the new HDF5 file
n_steps : uint
Number of voltage steps
n_bins : n_bins
Number of frequency bins
start_freq : float
Starting frequency in Hz
end_freq : float
Final freqency in Hz
data_type : str, optional
Type of data to generate
Options - 'BEPSData', 'BELineData'
Default - 'BEPSData'
mode : str, optional
Modulation mode to use when generating the data.
Options - 'DC modulation mode', 'AC modulation mode'
Default - 'DC modulation mode'
field_mode : str, optional
Field mode
Options - 'in-field', 'out-of-field', 'in and out-of-field'
Default - 'in and out-of-field'
n_cycles : uint, optional
Number of cycles
Default - 1
FORC_cycles : uint, optional
Number of FORC cycles
Default - 1
FORC_repeats : uint, optional
Number of FORC repeats
Default - 1
loop_a : float, optional
Loop coefficient a
Default - 1
loop_b : float, optional
Loop coefficient b
cycle_frac : str
Cycle fraction parameter.
Default - 'full'
image_folder : str
Path to the images that will be used to generate the loop coefficients. There must be 11 images named
'1.tif', '2.tif', ..., '11.tif'
Default - pycroscopy.io.translators.df_utils.beps_gen_utils.beps_image_folder
bin_factor : array_like of uint, optional
Downsampling factor for each dimension. Default is None.
bin_func : callable, optional
Function which will be called to calculate the return value
of each block. Function must implement an axis parameter,
i.e. numpy.mean. Ignored if bin_factor is None. Default is
numpy.mean.
image_type : str
File extension of images to be read. Default '.tif'
Returns
-------
"""
# Setup shared parameters
self.n_steps = n_steps
self.n_bins = n_bins
self.start_freq = start_freq
self.end_freq = end_freq
self.n_cycles = n_cycles
self.forc_cycles = FORC_cycles
self.forc_repeats = FORC_repeats
self.loop_a = loop_a
self.loop_b = loop_b
self.data_type = data_type
self.mode = mode
self.field_mode = field_mode
self.cycle_fraction = cycle_frac
self.bin_factor = bin_factor
self.bin_func = bin_func
if field_mode == 'in and out-of-field':
self.n_fields = 2
else:
self.n_fields = 1
self.n_loops = FORC_cycles*FORC_repeats*n_cycles*self.n_fields
self.n_sho_bins = n_steps*self.n_loops
self.n_spec_bins = n_bins*self.n_sho_bins
self.h5_path = h5_path
self.image_ext = image_type
'''
Check if a bin_factor is given. Set up binning objects if it is.
'''
if bin_factor is not None:
self.rebin = True
if isinstance(bin_factor, int):
self.bin_factor = (bin_factor, bin_factor)
elif len(bin_factor) == 2:
self.bin_factor = tuple(bin_factor)
else:
raise ValueError('Input parameter `bin_factor` must be a length 2 array_like or an integer.\n' +
'{} was given.'.format(bin_factor))
self.binning_func = block_reduce
self.bin_func = bin_func
images = self._read_data(image_folder)
data_gen_parms = {'N_x': self.N_x, 'N_y':self.N_y, 'n_steps;:': n_steps,
'n_bins': n_bins, 'start_freq': start_freq,
'end_freq': end_freq, 'n_cycles': n_cycles,
'forc_cycles': FORC_cycles, 'forc_repeats': FORC_repeats,
'loop_a': loop_a, 'loop_b': loop_b, 'data_type': data_type,
'VS_mode': mode, 'field_mode': field_mode, 'num_udvs_steps': self.n_spec_bins,
'VS_cycle_fraction': cycle_frac}
# Build the hdf5 file and get the datasets to write the data to
self._setup_h5(data_gen_parms)
# Calculate the loop parameters
coef_mat = self.calc_loop_coef_mat(images)
# In-and-out of field coefficients
if field_mode != 'in-field':
coef_OF_mat = np.copy(coef_mat)
if field_mode != 'out-of-field':
coef_IF_mat = np.copy(coef_mat)
coef_IF_mat[:, 4] -= 0.05
# Calculate the SHO fit and guess from the loop coefficients
self._calc_sho(coef_OF_mat, coef_IF_mat)
# Save the loop guess and fit to file
coef_OF_mat = np.hstack((coef_OF_mat[:, :9], np.ones([coef_OF_mat.shape[0], 1])))
coef_IF_mat = np.hstack((coef_IF_mat[:, :9], np.ones([coef_IF_mat.shape[0], 1])))
coef_mat = np.hstack([coef_IF_mat, coef_OF_mat])
self.h5_loop_fit[:] = np.tile(realToCompound(coef_mat, loop_fit32),
[1, int(self.n_loops / self.n_fields)])
self.h5_loop_guess[:] = np.tile(realToCompound(coef_mat * get_noise_vec(coef_mat.shape, 0.1),
loop_fit32),
[1, int(self.n_loops / self.n_fields)])
self._calc_raw()
self.h5_file.flush()
self.h5_file.close()
return self.h5_path
def _build_ancillary_datasets(self):
"""
Parameters
----------
None
Returns
-------
ds_pos_inds : MicroDataset
Position Indices
ds_pos_vals : MicroDataset
Position Values
ds_spec_inds : MicroDataset
Spectrosocpic Indices
ds_spec_vals : MicroDataset
Spectroscopic Values
"""
# create spectrogram at each pixel from the coefficients
spec_step = np.arange(0, 1, 1 / self.n_steps)
V_vec = 10 * np.arcsin(np.sin(self.n_fields * np.pi * spec_step)) * 2 / np.pi
# build DC vector for typical BEPS
Vdc_mat = np.vstack((V_vec, np.full(np.shape(V_vec), np.nan))) # Add out-of-field values
IF_vec = Vdc_mat.T.flatten() # Base DC vector
IF_vec = np.tile(IF_vec, self.n_cycles) # Now with Cycles
IF_vec = np.dot(1 + np.arange(self.forc_cycles)[:, None], IF_vec[None, :]) # Do a single FORC
IF_vec = np.tile(IF_vec.flatten(), self.forc_repeats) # Repeat the FORC
IF_inds = np.logical_not(np.isnan(IF_vec))
Vdc_vec = np.where(IF_inds, IF_vec, 0)
# build AC vector
Vac_vec = np.ones(np.shape(Vdc_vec))
# Build the Spectroscopic Values matrix
spec_dims = [self.n_bins, self.n_fields, self.n_steps, self.n_cycles, self.forc_cycles, self.forc_repeats]
spec_labs = ['Frequency', 'Field', 'DC_Offset', 'Cycle', 'FORC', 'FORC_repeat']
spec_units = ['Hz', '', 'V', '', '', '']
spec_start = [self.start_freq, 0, 0, 0, 0, 0]
spec_steps = [(self.end_freq - self.start_freq) / self.n_bins, 1, 1, 1, 1, 1]
# Remove dimensions with single values
real_dims = np.argwhere(np.array(spec_dims) != 1).squeeze()
spec_dims = [spec_dims[idim] for idim in real_dims]
spec_labs = [spec_labs[idim] for idim in real_dims]
spec_units = [spec_units[idim] for idim in real_dims]
spec_start = [spec_start[idim] for idim in real_dims]
spec_steps = [spec_steps[idim] for idim in real_dims]
spec_inds, spec_vals = build_ind_val_dsets(spec_dims,
labels=spec_labs,
units=spec_units,
initial_values=spec_start,
steps=spec_steps)
# Replace the dummy DC values with the correct ones
spec_vals.data[spec_labs.index('DC_Offset'), :] = np.repeat(Vdc_vec, self.n_bins)
position_ind_mat, position_val_mat = build_ind_val_dsets([self.N_x, self.N_y], False,
steps=[10 / self.N_x, 10 / self.N_y],
initial_values=[-5, -5],
labels=['X', 'Y'],
units=['um', 'um'])
return position_ind_mat, position_val_mat, spec_inds, spec_vals
def _setup_h5(self, data_gen_parms):
"""
Setups up the hdf5 file structure before doing the actual generation
Parameters
----------
data_gen_parms : dict
Dictionary containing the parameters to write to the Measurement Group as attributes
Returns
-------
"""
'''
Build the group structure down to the channel group
'''
# Set up the basic group structure
root_grp = MicroDataGroup('')
root_parms = generate_dummy_main_parms()
root_parms['translator'] = 'FAKEBEPS'
root_parms['data_type'] = data_gen_parms['data_type']
root_grp.attrs = root_parms
meas_grp = MicroDataGroup('Measurement_')
chan_grp = MicroDataGroup('Channel_')
meas_grp.attrs.update(data_gen_parms)
# Create the Position and Spectroscopic datasets for the Raw Data
ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals = self._build_ancillary_datasets()
raw_chunking = calc_chunks([self.n_pixels,
self.n_spec_bins],
np.complex64(0).itemsize,
unit_chunks=[1, self.n_bins])
ds_raw_data = MicroDataset('Raw_Data', data=[],
maxshape=[self.n_pixels, self.n_spec_bins],
dtype=np.complex64,
compression='gzip',
chunking=raw_chunking,
parent=meas_grp)
chan_grp.addChildren([ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals,
ds_raw_data])
meas_grp.addChildren([chan_grp])
root_grp.addChildren([meas_grp])
hdf = ioHDF5(self.h5_path)
hdf.delete()
h5_refs = hdf.writeData(root_grp)
# Delete the MicroDatasets to save memory
del ds_raw_data, ds_spec_inds, ds_spec_vals, ds_pos_inds, ds_pos_vals
# Get the file and Raw_Data objects
h5_raw = getH5DsetRefs(['Raw_Data'], h5_refs)[0]
h5_chan_grp = h5_raw.parent
# Get the Position and Spectroscopic dataset objects
h5_pos_inds = getH5DsetRefs(['Position_Indices'], h5_refs)[0]
h5_pos_vals = getH5DsetRefs(['Position_Values'], h5_refs)[0]
h5_spec_inds = getH5DsetRefs(['Spectroscopic_Indices'], h5_refs)[0]
h5_spec_vals = getH5DsetRefs(['Spectroscopic_Values'], h5_refs)[0]
# Link the Position and Spectroscopic datasets as attributes of Raw_Data
link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)
'''
Build the SHO Group
'''
sho_grp = MicroDataGroup('Raw_Data-SHO_Fit_', parent=h5_chan_grp.name)
# Build the Spectroscopic datasets for the SHO Guess and Fit
sho_spec_starts = np.where(h5_spec_inds[0] == 0)[0]
sho_spec_labs = get_attr(h5_spec_inds, 'labels')
ds_sho_spec_inds, ds_sho_spec_vals = buildReducedSpec(h5_spec_inds,
h5_spec_vals,
keep_dim=sho_spec_labs != 'Frequency',
step_starts=sho_spec_starts)
sho_chunking = calc_chunks([self.n_pixels,
self.n_sho_bins],
sho32.itemsize,
unit_chunks=[1, 1])
ds_sho_fit = MicroDataset('Fit', data=[],
maxshape=[self.n_pixels, self.n_sho_bins],
dtype=sho32,
compression='gzip',
chunking=sho_chunking,
parent=sho_grp)
ds_sho_guess = MicroDataset('Guess', data=[],
maxshape=[self.n_pixels, self.n_sho_bins],
dtype=sho32,
compression='gzip',
chunking=sho_chunking,
parent=sho_grp)
sho_grp.addChildren([ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals])
# Write the SHO group and datasets to the file and delete the MicroDataset objects
h5_sho_refs = hdf.writeData(sho_grp)
del ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals
# Get the dataset handles for the fit and guess
h5_sho_fit = getH5DsetRefs(['Fit'], h5_sho_refs)[0]
h5_sho_guess = getH5DsetRefs(['Guess'], h5_sho_refs)[0]
# Get the dataset handles for the SHO Spectroscopic datasets
h5_sho_spec_inds = getH5DsetRefs(['Spectroscopic_Indices'], h5_sho_refs)[0]
h5_sho_spec_vals = getH5DsetRefs(['Spectroscopic_Values'], h5_sho_refs)[0]
# Link the Position and Spectroscopic datasets as attributes of the SHO Fit and Guess
link_as_main(h5_sho_fit, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)
link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)
'''
Build the loop group
'''
loop_grp = MicroDataGroup('Fit-Loop_Fit_', parent=h5_sho_fit.parent.name)
# Build the Spectroscopic datasets for the loops
loop_spec_starts = np.where(h5_sho_spec_inds[0] == 0)[0]
loop_spec_labs = get_attr(h5_sho_spec_inds, 'labels')
ds_loop_spec_inds, ds_loop_spec_vals = buildReducedSpec(h5_sho_spec_inds,
h5_sho_spec_vals,
keep_dim=loop_spec_labs != 'DC_Offset',
step_starts=loop_spec_starts)
# Create the loop fit and guess MicroDatasets
loop_chunking = calc_chunks([self.n_pixels, self.n_loops],
loop_fit32.itemsize,
unit_chunks=[1, 1])
ds_loop_fit = MicroDataset('Fit', data=[],
maxshape=[self.n_pixels, self.n_loops],
dtype=loop_fit32,
compression='gzip',
chunking=loop_chunking,
parent=loop_grp)
ds_loop_guess = MicroDataset('Guess', data=[],
maxshape=[self.n_pixels, self.n_loops],
dtype=loop_fit32,
compression='gzip',
chunking=loop_chunking,
parent=loop_grp)
# Add the datasets to the loop group then write it to the file
loop_grp.addChildren([ds_loop_fit, ds_loop_guess, ds_loop_spec_inds, ds_loop_spec_vals])
h5_loop_refs = hdf.writeData(loop_grp)
# Delete the MicroDatasets
del ds_loop_spec_vals, ds_loop_spec_inds, ds_loop_guess, ds_loop_fit
# Get the handles to the datasets
h5_loop_fit = getH5DsetRefs(['Fit'], h5_loop_refs)[0]
h5_loop_guess = getH5DsetRefs(['Guess'], h5_loop_refs)[0]
h5_loop_spec_inds = getH5DsetRefs(['Spectroscopic_Indices'], h5_loop_refs)[0]
h5_loop_spec_vals = getH5DsetRefs(['Spectroscopic_Values'], h5_loop_refs)[0]
# Link the Position and Spectroscopic datasets to the Loop Guess and Fit
link_as_main(h5_loop_fit, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)
link_as_main(h5_loop_guess, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)
self.h5_raw = h5_raw
self.h5_sho_guess = h5_sho_guess
self.h5_sho_fit = h5_sho_fit
self.h5_loop_guess = h5_loop_guess
self.h5_loop_fit = h5_loop_fit
self.h5_spec_vals = h5_spec_vals
self.h5_spec_inds = h5_spec_inds
self.h5_sho_spec_inds = h5_sho_spec_inds
self.h5_sho_spec_vals = h5_sho_spec_vals
self.h5_loop_spec_inds = h5_loop_spec_inds
self.h5_loop_spec_vals = h5_loop_spec_vals
self.h5_file = h5_raw.file
return
def calc_loop_coef_mat(self, image_list):
"""
Build the loop coefficient matrix
Parameters
----------
image_list : list of numpy.ndarray
Images that will be used to generate the coefficients
Returns
-------
coef_mat : numpy.ndarray
Array of loop coefficients
"""
# Setup the limits on the coefficients
coef_limits = [[-1.0, -0.4]] # 0 - loop bottom edge
coef_limits.append([0.5, 2.0]) # 1 - loop height
coef_limits.append([3.0, 5.0]) # 2 - loop crossing 1
coef_limits.append([-5.0, -3.0]) # 3 - loop crossing 2
coef_limits.append([-0.001, 0.0]) # 4 - loop slope
coef_limits.append([self.loop_a, self.loop_b]) # 5 - loop corner sharpness 1
coef_limits.append([self.loop_a / 4, self.loop_b / 4]) # 6 - loop corner shaprness 2
coef_limits.append([self.loop_a / 4, self.loop_b / 4]) # 7 - loop corner sharpness 3
coef_limits.append([self.loop_a, self.loop_b]) # 8 - loop corner sharpness 4
coef_limits.append([275E3, 325E3]) # 9 - resonant frequency
coef_limits.append([100.0, 150.0]) # 10 - Q factor
# build loop coef matrix
coef_mat = np.zeros([self.n_pixels, 11])
for coef_ind in range(11):
coef_img = image_list[coef_ind]
coef_min = coef_limits[coef_ind][0]
coef_max = coef_limits[coef_ind][1]
coef_img = coef_img * (coef_max - coef_min) + coef_min
coef_mat[:, coef_ind] = coef_img.flatten()
return coef_mat
def _calc_sho(self, coef_OF_mat, coef_IF_mat, amp_noise=0.1, phase_noise=0.1, q_noise=0.2, resp_noise=0.01):
"""
Build the SHO dataset from the coefficient matrices
Parameters
----------
coef_OF_mat : numpy.ndarray
Out-of-field coefficients
coef_IF_mat : numpy.ndarray
In-field coefficients
amp_noise : float
Noise factor for amplitude parameter
phase_noise : float
Noise factor for phase parameter
q_noise : float
Noise factor for Q-value parameter
resp_noise : float
Noide factor for w0 parameter
Returns
-------
None
"""
vdc_vec = self.h5_sho_spec_vals[self.h5_sho_spec_vals.attrs['DC_Offset']].squeeze()
field = self.h5_sho_spec_vals[self.h5_sho_spec_vals.attrs['Field']].squeeze()
of_inds = field == 0
if_inds = field == 1
# determine how many pixels can be read at once
mem_per_pix = vdc_vec.size*np.float32(0).itemsize
free_mem = self.max_ram-vdc_vec.size*vdc_vec.dtype.itemsize*6
batch_size = int(free_mem/mem_per_pix)
batches = gen_batches(self.n_pixels, batch_size)
for pix_batch in batches:
R_OF = np.array([loop_fit_function(vdc_vec[of_inds], coef) for coef in coef_OF_mat[pix_batch]])
R_IF = np.array([loop_fit_function(vdc_vec[if_inds], coef) for coef in coef_IF_mat[pix_batch]])
R_mat = np.stack([R_IF, R_OF], axis=2).reshape(-1, self.n_sho_bins)
del R_OF, R_IF
amp = np.abs(R_mat)
resp = coef_OF_mat[pix_batch, 9, None] * np.ones_like(R_mat)
q_val = coef_OF_mat[pix_batch, 10, None] * np.ones_like(R_mat)
phase = np.sign(R_mat) * np.pi / 2
self.h5_sho_fit[pix_batch, :] = realToCompound(np.hstack([amp,
resp,
q_val,
phase,
np.ones_like(R_mat)]),
sho32)
self.h5_sho_guess[pix_batch, :] = realToCompound(np.hstack([amp*get_noise_vec(self.n_sho_bins, amp_noise),
resp*get_noise_vec(self.n_sho_bins, resp_noise),
q_val*get_noise_vec(self.n_sho_bins, q_noise),
phase*get_noise_vec(self.n_sho_bins, phase_noise),
np.ones_like(R_mat)]),
sho32)
self.h5_file.flush()
return
def _calc_raw(self):
"""
Returns
-------
"""
mem_per_pix = self.n_sho_bins*self.h5_sho_fit.dtype.itemsize+self.n_spec_bins*self.h5_raw.dtype.itemsize
free_mem = self.max_ram
batch_size = int(free_mem/mem_per_pix)
batches = gen_batches(self.n_pixels, batch_size)
w_vec = self.h5_spec_vals[get_attr(self.h5_spec_vals, 'Frequency')].squeeze()
w_vec = w_vec[:self.n_bins]
for pix_batch in batches:
sho_chunk = self.h5_sho_fit[pix_batch, :].flatten()
raw_data = np.zeros([sho_chunk.shape[0], self.n_bins], dtype=np.complex64)
for iparm, sho_parms in enumerate(sho_chunk):
raw_data[iparm, :] = SHOfunc(sho_parms, w_vec)
self.h5_raw[pix_batch, :] = raw_data.reshape([-1, self.n_spec_bins])
self.h5_file.flush()
return | mit |
zhoulingjun/zipline | tests/modelling/test_modelling_algo.py | 9 | 7105 | """
Tests for Algorithms running the full FFC stack.
"""
from unittest import TestCase
from os.path import (
dirname,
join,
realpath,
)
from numpy import (
array,
full_like,
nan,
)
from numpy.testing import assert_almost_equal
from pandas import (
concat,
DataFrame,
DatetimeIndex,
Panel,
read_csv,
Series,
Timestamp,
)
from six import iteritems
from testfixtures import TempDirectory
from zipline.algorithm import TradingAlgorithm
from zipline.api import (
# add_filter,
add_factor,
get_datetime,
)
from zipline.assets import AssetFinder
# from zipline.data.equities import USEquityPricing
from zipline.data.ffc.loaders.us_equity_pricing import (
BcolzDailyBarReader,
DailyBarWriterFromCSVs,
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
USEquityPricingLoader,
)
# from zipline.modelling.factor import CustomFactor
from zipline.modelling.factor.technical import VWAP
from zipline.utils.test_utils import (
make_simple_asset_info,
str_to_seconds,
)
from zipline.utils.tradingcalendar import trading_days
TEST_RESOURCE_PATH = join(
dirname(dirname(realpath(__file__))), # zipline_repo/tests
'resources',
'modelling_inputs',
)
def rolling_vwap(df, length):
"Simple rolling vwap implementation for testing"
closes = df['close'].values
volumes = df['volume'].values
product = closes * volumes
out = full_like(closes, nan)
for upper_bound in range(length, len(closes) + 1):
bounds = slice(upper_bound - length, upper_bound)
out[upper_bound - 1] = product[bounds].sum() / volumes[bounds].sum()
return Series(out, index=df.index)
class FFCAlgorithmTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.AAPL = 1
cls.MSFT = 2
cls.BRK_A = 3
cls.assets = [cls.AAPL, cls.MSFT, cls.BRK_A]
asset_info = make_simple_asset_info(
cls.assets,
Timestamp('2014'),
Timestamp('2015'),
['AAPL', 'MSFT', 'BRK_A'],
)
cls.asset_finder = AssetFinder(asset_info)
cls.tempdir = tempdir = TempDirectory()
tempdir.create()
try:
cls.raw_data, cls.bar_reader = cls.create_bar_reader(tempdir)
cls.adj_reader = cls.create_adjustment_reader(tempdir)
cls.ffc_loader = USEquityPricingLoader(
cls.bar_reader, cls.adj_reader
)
except:
cls.tempdir.cleanup()
raise
cls.dates = cls.raw_data[cls.AAPL].index.tz_localize('UTC')
@classmethod
def create_bar_reader(cls, tempdir):
resources = {
cls.AAPL: join(TEST_RESOURCE_PATH, 'AAPL.csv'),
cls.MSFT: join(TEST_RESOURCE_PATH, 'MSFT.csv'),
cls.BRK_A: join(TEST_RESOURCE_PATH, 'BRK-A.csv'),
}
raw_data = {
asset: read_csv(path, parse_dates=['day']).set_index('day')
for asset, path in iteritems(resources)
}
# Add 'price' column as an alias because all kinds of stuff in zipline
# depends on it being present. :/
for frame in raw_data.values():
frame['price'] = frame['close']
writer = DailyBarWriterFromCSVs(resources)
data_path = tempdir.getpath('testdata.bcolz')
table = writer.write(data_path, trading_days, cls.assets)
return raw_data, BcolzDailyBarReader(table)
@classmethod
def create_adjustment_reader(cls, tempdir):
dbpath = tempdir.getpath('adjustments.sqlite')
writer = SQLiteAdjustmentWriter(dbpath)
splits = DataFrame.from_records([
{
'effective_date': str_to_seconds('2014-06-09'),
'ratio': (1 / 7.0),
'sid': cls.AAPL,
}
])
mergers = dividends = DataFrame(
{
# Hackery to make the dtypes correct on an empty frame.
'effective_date': array([], dtype=int),
'ratio': array([], dtype=float),
'sid': array([], dtype=int),
},
index=DatetimeIndex([], tz='UTC'),
columns=['effective_date', 'ratio', 'sid'],
)
writer.write(splits, mergers, dividends)
return SQLiteAdjustmentReader(dbpath)
@classmethod
def tearDownClass(cls):
cls.tempdir.cleanup()
def make_source(self):
return Panel(self.raw_data).tz_localize('UTC', axis=1)
def test_handle_adjustment(self):
AAPL, MSFT, BRK_A = assets = self.AAPL, self.MSFT, self.BRK_A
raw_data = self.raw_data
adjusted_data = {k: v.copy() for k, v in iteritems(raw_data)}
AAPL_split_date = Timestamp("2014-06-09", tz='UTC')
split_loc = raw_data[AAPL].index.get_loc(AAPL_split_date)
# Our view of AAPL's history changes after the split.
ohlc = ['open', 'high', 'low', 'close']
adjusted_data[AAPL].ix[:split_loc, ohlc] /= 7.0
adjusted_data[AAPL].ix[:split_loc, ['volume']] *= 7.0
window_lengths = [1, 2, 5, 10]
# length -> asset -> expected vwap
vwaps = {length: {} for length in window_lengths}
vwap_keys = {}
for length in window_lengths:
vwap_keys[length] = "vwap_%d" % length
for asset in AAPL, MSFT, BRK_A:
raw = rolling_vwap(raw_data[asset], length)
adj = rolling_vwap(adjusted_data[asset], length)
vwaps[length][asset] = concat(
[
raw[:split_loc],
adj[split_loc:]
]
)
def initialize(context):
context.vwaps = []
for length, key in iteritems(vwap_keys):
context.vwaps.append(VWAP(window_length=length))
add_factor(context.vwaps[-1], name=key)
def handle_data(context, data):
today = get_datetime()
factors = data.factors
for length, key in iteritems(vwap_keys):
for asset in assets:
computed = factors.loc[asset, key]
expected = vwaps[length][asset].loc[today]
# Only having two places of precision here is a bit
# unfortunate.
assert_almost_equal(computed, expected, decimal=2)
# Do the same checks in before_trading_start
before_trading_start = handle_data
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
data_frequency='daily',
ffc_loader=self.ffc_loader,
asset_finder=self.asset_finder,
start=self.dates[max(window_lengths)],
end=self.dates[-1],
)
algo.run(
source=self.make_source(),
# Yes, I really do want to use the start and end dates I passed to
# TradingAlgorithm.
overwrite_sim_params=False,
)
| apache-2.0 |
ZenDevelopmentSystems/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
zaxtax/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
datacommonsorg/tools | stat_var_renaming/stat_var_renaming.py | 1 | 27748 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
This file performs the renaming of all statistical variables present in the Data
Commons Knowledge Graph. Human-intelligible StatVar names are useful for end
users as they may be pulled from both the Python API or Google Sheets API by
name.
1) Base Schema: The basic schema for any human readable statistical variable is
mprop_popType_v1_v2_v3... For example, Count_Person_BornInStateOfResidence
2) Optional inclusion of StatType: statType is included when the StatType is not
measuredValue or Unknown. For example, instead of Age_Person, we output
MedianAge_Person
3) Certain data sets are blacklisted: for example, all bio data sets and a few
miscellaneous ones are excluded. This blacklist was created by tjann.
4) Dependent variables are removed. dependent variables are constraints that are
inherently included, but not really necessary. For example, a person earning
an income of 10k to 15k USD may only be measured by the US Census if they are
older than 15 and have an income. For example,
"Count_Person_Years15Onwards_IncomeOfUSDollar10000To14999_WithIncome" becomes
"Count_Person_IncomeOfUSDollar10000To14999" after accounting for the
unnecessary variables. These dependent variables are defined in the textproto
stat vars config file.
4) Boolean constraints are replaced by their populations: for example, p1 =
isInternetUser and v1=True/False becomes v1=isInternetUser/notInternetUser.
5) Measurement properties are stripped from constraints: for example,
p1 = employment and v1 = USC_Unemployed becomes v1=Unemployed
6) NAICS Industry codes are replaced by industry names: we have a combination of
NAICS specific and overview codes. In both cases, we replace the industry
code (e.g. NAICS/23) with the industry. An example statistical variable is
WagesAnnual_Establishment_NAICSConstruction
7) Cause of death properties are renamed: e.g., p1 = causeOfDeath and
v1="ICD10/E00-E89" becomes v1="EndocrineNutritionalMetabolicDiseases". These
names are generated directly from the ICD10 names stored in BigQuery.
Exceptionally long or confusing names were manually renamed.
8) DEA drug names are renamed: e.g., p1="drugPrescribed" and v1="drug/dea/9250"
become v1="Methadone". These are manually renamed. Some drug names are
intentionally left as their codes. For example, dea/7444 corresponds to
"4-Hydroxy-3-methoxy-methamphetamine", which does not have a common name.
Both the codes and drug names will be valid constraints.
9) Certain variables have text prepended or appended to their constraints to
improve readability: for example p1 = childSchoolEnrollment and
v1=EnrolledInPublicSchool is changed to v1="ChildEnrolledInPublicSchool".
These mappings are applied to ~15 variables.
10) Miscellaneous changes: a) MeasuredProp InsuredUnemploymentRate changed to
Rate_InsuredUnemployment to match the existing formula.
"""
from absl import app
from google.protobuf import text_format
from google.cloud import bigquery
from google.colab import auth
import re
import os
import pandas as pd
import numpy as np
import stat_var_renaming_constants as svrc
import stat_var_renaming_functions as svrf
# Constants
# Max total number of constraints of a variable to include (Dependent
# variables excluded).
_MAX_CONSTRAINTS = 3
_MAX_CONSTRAINTS_WITH_DPV = 6
# If true, no new statistical variables will be introduced.
ONLY_REGENERATE_OUTPUT = False
def authenticate_bq_client():
""" Authenticates and returns a BigQuery client connection. By default this
code assumes it will be run in Google Colab which handles BigQuery
authentication. To run this code elsewhere this method needs to be updated
to properly authenticate a BigQuery client.
Returns:
An authenticated SQL client with a function called query that given a SQL
query returns a response object that can be converted into a dataframe.
"""
# Users should update the authentication method if not using Google CoLab.
auth.authenticate_user()
# Create and return client.
project_id = "google.com:datcom-store-dev"
return bigquery.Client(project=project_id)
def download_stat_vars(client):
""" Queries unique list of statistical variables from BigQuery.
Creates a join across statistical populations and observations to generate
distinct list of statistical variables. Certain datasets like bio are
excluded. The original dpvs are preserved in new columns.
Args:
client: An authenticate BigQuery SQL client.
Returns:
stat_vars: Pandas dataframe containing unique information for all
potential stat vars in the database.
Raises:
Query failure: If improper authentication is given.
"""
# Dynamically create query for constraints in SQL query.
constraint_string = ""
pop_string = ""
for num in range(1, _MAX_CONSTRAINTS + 1):
constraint_string += f"SP.v{num} as v{num},\n"
pop_string += f"SP.p{num} as p{num},\n"
# Dynamically create list of blacklisted provences, as a string.
blacklist = [
'"%s"' % prov_id
for prov_id in frozenset().union(*[svrc._MISC_DATASETS,
svrc._BIO_DATASETS])
]
blacklist_str = ', '.join(blacklist) if blacklist else '""'
# Input information into SQL template and perform the query.
query_for_all_stat_vars = (svrc.QUERY_FOR_ALL_STAT_VARS.replace(
"{CONSTRAINTS}",
constraint_string).replace("{POPULATIONS}", pop_string).replace(
"{comma_sep_prov_blacklist}",
blacklist_str).replace("{MAX_CONTRAINTS}", str(_MAX_CONSTRAINTS)))
stat_vars = client.query(query_for_all_stat_vars).to_dataframe()
# Make a pristine copy of constraint names for output MCF.
for c in range(1, _MAX_CONSTRAINTS_WITH_DPV + 1):
stat_vars[f"orig_p{c}"] = stat_vars[f"p{c}"]
stat_vars[f"orig_v{c}"] = stat_vars[f"v{c}"]
return stat_vars
### Variable renaming scripts
def addPropertyRemapping(remapper, prop, function):
""" Helper function to add new remapping function to a certain property.
Args:
remapper: Dictionary with mapping from properties to renaming functions.
prop: Property to perform the remapping on.
function: Renaming function that takes three arguments
(prop, constraint, popType) and returns the new name for the constraint.
"""
if prop not in remapper:
remapper[prop] = []
remapper[prop].append(function)
def remap_constraint_from_prop(row, prop_remap):
""" Helper which applies property remappings to all constraints in a dataset.
Args:
row: Pandas row to apply function to.
prop_remap: Dictionary of renaming functions for each property.
"""
for constraint in range(1, 1 + row['numConstraints']):
prop = row[f"p{constraint}"]
if prop in prop_remap:
# May need to apply multiple functions for a single property.
remapper = prop_remap[prop]
for function in remapper:
row[f"v{constraint}"] = function(prop, row[f"v{constraint}"],
row['populationType'])
return row
def generate_dependent_constraint_list():
""" Generates a list of dependent variables.
Using an OS system call, a protobuf definition is compiled. A definition
file is then read in and used to generate a pandas dataframe of dependent
variable definitions.
Returns:
obs_spec_list: Observation for statistical variables in
protobuf object format.
"""
# Generate population observation spec. Creates a new python file.
os.system("protoc -I=. --python_out=. pop_obs_spec_common.proto")
# Load newly created protobuf class definition.
import pop_obs_spec_common_pb2
obs_spec_list = pop_obs_spec_common_pb2.PopObsSpecList()
# Load in PV list from spec proto. Note that covid cases was temporarily
# added as a DPV for display, but shouldn't truly be one.
with open("pop_obs_spec_nocovid.textproto") as f:
counts = f.read()
text_format.Parse(counts, obs_spec_list)
# Create a dataframe that matches the greater stat_vars from DB for merging.
dpvs = pd.DataFrame()
for spec in obs_spec_list.spec:
# Get universal props.
new_row = {}
new_row['populationType'] = spec.pop_type
new_row['measuredProp'] = spec.mprop
new_row['statType'] = spec.stat_type
# Get independent variables.
variables = []
for name in spec.cprop:
variables.append((name, "", False))
# Get dependent variables which depend on the value of the
# constraint.
for name in spec.dpv:
variables.append((name.prop, name.val, True))
# Variables are sorted alphabetically.
variables = sorted(variables)
# Add as a row to entire dataframe.
for index, variable in enumerate(variables):
var_name, constraint, is_dpv_var = variable
new_row[f"orig_p{index + 1}"] = var_name
new_row[f"p{index + 1}_is_dpv"] = is_dpv_var
if is_dpv_var:
new_row[f"orig_v{index + 1}"] = constraint
dpvs = dpvs.append(new_row, ignore_index=True)
# Only return statistical variables with at least one dependent variable.
query_string = ""
for c in range(1, _MAX_CONSTRAINTS + 1):
query_string += f"p{c}_is_dpv == 1 or "
return dpvs.query(f"{query_string} False")
def remove_dependent_constraints(stat_vars):
""" Removes all dependent constraints from list of stat vars.
Args: stat_vars: Pandas dataframe holding all stat vars
Returns: stat_vars with all dependent constraints imputed in place for all
rows.
"""
# Generate list of dependent constraints from protobuf config.
dpvs = generate_dependent_constraint_list()
# Merge across common columns shared with dependent variable list.
common_cols = (['measuredProp', 'populationType', 'statType'] +
["orig_p" + x for x in range(_MAX_CONSTRAINTS_WITH_DPV)])
stat_vars = pd.merge(stat_vars, dpvs, on=common_cols, how='left')
# Replace any dependent variables and their value with nan.
for c in range(1, _MAX_CONSTRAINTS + 1):
dpv_match = stat_vars.query(f"p{c}_is_dpv == 1")
# Ensure that constraint {c} exists in both tables.
if f"orig_v{c}_x" in dpv_match and f"orig_v{c}_y" in dpv_match:
# Only remove dependent constraints where the value matches.
dpv_match = dpv_match.query(f"orig_v{c}_x == orig_v{c}_y")
stat_vars.loc[dpv_match.index, f"p{c}"] = np.nan
stat_vars.loc[dpv_match.index, f"v{c}"] = np.nan
stat_vars.loc[dpv_match.index, "numConstraints"] = (
stat_vars.loc[dpv_match.index,
"numConstraints"].apply(lambda x: x - 1))
# Left shift all imputed columns to remove holes.
stat_vars = stat_vars.apply(left_fill_columns, axis=1)
# Rename constraints from merge.
for c in range(1, _MAX_CONSTRAINTS + 1):
stat_vars = stat_vars.rename({f"orig_v{c}_x": f"orig_v{c}"},
axis=1)
return stat_vars
def left_fill_columns(row):
""" Removes holes in constraints after imputing dependent constraints.
Args:
Row of dataframe with or without holes present between constraints.
Returns:
Row of dataframe without holes present between constraints.
"""
row_constraints = min(row['numConstraints'], _MAX_CONSTRAINTS_WITH_DPV)
# Keep track of the search location to look to pull columns from.
search_location = 2
for base_col in range(1, row_constraints + 1):
# If current population is null then search for the next non-null column.
if pd.isna(row[f"p{base_col}"]):
search_location = max(search_location, base_col + 1)
while search_location <= _MAX_CONSTRAINTS:
# Swap first non-null with the current null location.
if not pd.isna(row[f"p{search_location}"]):
row[f"p{base_col}"] = row[f"p{search_location}"]
row[f"v{base_col}"] = row[f"v{search_location}"]
row[f"p{search_location}"] = np.nan
row[f"v{search_location}"] = np.nan
search_location += 1
break
search_location += 1
return row
def row_to_human_readable(row):
""" Generates a human readable name for a dataframe row.
Args:
Row: A preprocessed dataframe row with dependent variables removed and
all constraints, values, and populations remove.
Returns:
String mapping between the provided row to the corresponding human readable
dcid format: <?statType>_<mProp>_<popType>_<v1>_<v2>_..._<mQual>_<mDenom>
"""
# Add measured property and population type. e.g. Count_InsuranceClaim.
human_string = (f"{svrc.capitalizeFirst(row['measuredProp'])}" +
f"_{svrc.capitalizeFirst(row['populationType'])}")
# StatType (e.g. median) is prepended if the stat type is not measuredValue.
stat_type = row['statType']
if stat_type != "measuredValue" and stat_type != "Unknown":
human_string = (svrc.capitalizeFirst(stat_type.replace("Value", ""))
+ "_" + human_string)
# Append renamed constraint fields.
row_constraints = min(row['numConstraints'], _MAX_CONSTRAINTS_WITH_DPV)
for num in range(1, row_constraints + 1):
if not pd.isna(row[f"v{num}"]):
human_string += "_" + row[f"v{num}"]
# Append nmeasurement qualifier if it exists.
measurement_qualifier = row['measurementQualifier']
if not pd.isna(measurement_qualifier):
human_string = f"{human_string}_{measurement_qualifier}"
# Append measurement denominator if it exists.
measurement_denominator = row['measurementDenominator']
if not pd.isna(measurement_denominator):
# Special Case: PerCapita which is directly appended.
if measurement_denominator == "PerCapita":
human_string = f"{human_string}_PerCapita"
# MDoms that are properties (all lower case) are added as Per(Mdom).
elif measurement_denominator[0].islower():
human_string = (
f"{human_string}_Per{svrc.capitalizeFirst(measurement_denominator)}"
)
# Everything else is AsAFractionOf.
else:
human_string = f"{human_string}_AsAFractionOf{measurement_denominator}"
return human_string
def ensure_no_overlapping_stat_vars(stat_vars):
""" Ensures no collisions between statistical variables.
This function ensures that there are not two distinct statistical variables
(e.g. different constraint, values, etc) that have the same name. There may
be two rows in the dataframe with the same name that have differences that
do not result in different statistical variables. E.g. same properties
except for measurement method.
Args:
stat_vars: Dataframe of stat_vars with human readable name generated.
Returns:
stat_vars dataframe with no duplicate rows by name.
Raises:
Assertion error if two distinct statistical variables evaluate to the same
name.
"""
# Consider the subset of statistical variables that have at least one overlap.
overlapping_stat_var_names = (stat_vars[stat_vars.duplicated(
'HumanReadableName', keep=False)])
# Check to make sure that none of these are actually different stat vars.
# TODO(tjann): update this list--measurementDenominator and scalingFactor
# are no longer SV props.
properties_of_statistical_variables = [
'populationType', 'measuredProp', 'measurementQualifier',
'measurementDenominator', 'statType', 'scalingFactor'
]
for c in range(1, _MAX_CONSTRAINTS + 1):
properties_of_statistical_variables.append(f"orig_p{c}")
properties_of_statistical_variables.append(f"orig_v{c}")
# See if there are any duplicates outside of this set.
bad_overlaps = overlapping_stat_var_names[
overlapping_stat_var_names.duplicated(
properties_of_statistical_variables, keep=False) == False]
# Print out error if there are real collisions.
bad_overlaps = bad_overlaps.sort_values(['v1', 'v2', 'v3'])
bad_overlaps_str = ""
for index, row in bad_overlaps.iterrows():
bad_overlaps_str += row_to_stat_var_mcf(row) + ","
assert bad_overlaps.shape[0] == 0, "Duplicate StatVars!: " + bad_overlaps
# No issues so remove these duplicate names.
return stat_vars.drop_duplicates("HumanReadableName")
def build_original_constraints(row):
""" Builds list of original properties and constraints in MCF Format.
Helper method for output MCF generation from a statistical variable row.
Args:
row: Row of pandas dataframe with renamed constraint and value fields.
Returns:
Multiline string of constraints and values in the format of
p1: dcs:v1
p2: dcs:v2...
"""
constraints_text = ""
current_constraint = 1
while (current_constraint <= _MAX_CONSTRAINTS
and not pd.isna(row[f"orig_p{current_constraint}"])):
constraints_text += f"{row[f'orig_p{current_constraint}']}:"
constraints_text += f"dcs:{row[f'orig_v{current_constraint}']}\n"
current_constraint += 1
return constraints_text
def row_to_stat_var_mcf(row):
""" Creates MCF Statistical Variable node from a dataframe row.
To integrate these new statistical variables into the Data Commons graph,
they need to uploaded as MCF nodes with a new dcid but with the same
properties.
This function creates that MCF as a text file from a row of the dataframe.
Args:
row: Statistical variable dataframe row with HumanReadableName.
Returns:
Multiline string of the new MCF node for that statistical variable.
"""
new_stat_var = (svrc.TEMPLATE_STAT_VAR
.replace("{human_readable_dcid}", row['HumanReadableName'])\
.replace("{populationType}", row['orig_populationType'])\
.replace("{statType}", row['statType'])\
.replace("{measuredProperty}", row['measuredProp'])\
.replace("{CONSTRAINTS}", build_original_constraints(row)))
# Add optional fields.
if not pd.isna(row['measurementQualifier']):
new_stat_var += f"measurementQualifier: dcs:{row['measurementQualifier']}\n"
if not pd.isna(row['measurementDenominator']):
new_stat_var += (
f"measurementDenominator: dcs:{row['measurementDenominator']}\n")
return new_stat_var
def remove_new_stat_vars(stat_vars, client):
""" Removes Statistical Variables that do not already exist.
Pulls the existing StatVar list from production and makes sure that no new
statistical variables are added. This function is used in the event that you
need to refresh a file dependent on this generation, but do not want to
fully refresh the statistical variable list.
Args:
stat_vars: The dataframe of statistical variable with
HumanReadableNames generated.
client: An authenticated BigQuery client.
"""
stat_vars_query = """
SELECT distinct id FROM
`google.com:datcom-store-dev.dc_kg_2020_07_12_01_01_43.Instance`
WHERE type = "StatisticalVariable"
"""
existing_stat_vars = client.query(stat_vars_query).to_dataframe()['id']
return stat_vars[stat_vars['HumanReadableName'].isin(existing_stat_vars)]
def create_human_readable_names(stat_vars, client):
""" Handles generating human readable statistical variables from BQ
response.
This function handles renaming constraints, populations, and values;
removing dependent constraints; and adding a column for the HumanReadableName.
Args:
stat_vars: Raw dataframe of statistical variables as returned by the
download_stat_vars function.
client: Authenticated BigQuery dataframe. x
Returns:
stat_vars dataframe with new HumanReadableName column.
"""
# Build constraint remappings.
prop_remap = {}
svrf.rename_naics_codes(prop_remap, client)
svrf.rename_dea_drugs(prop_remap, client)
svrf.rename_isic_codes(prop_remap, client)
svrf.cause_of_death_remap(prop_remap, client)
svrf.prefix_strip(prop_remap)
svrf.rename_boolean_variables(prop_remap, stat_vars)
svrf.remap_numerical_quantities(prop_remap)
svrf.prepend_and_append_text(prop_remap)
svrf.misc_mappings(prop_remap)
# Drop erroneous constraints.
for c in range(1, _MAX_CONSTRAINTS + 1):
stat_vars = stat_vars.query(f"v{c} != 'NAICSUnknown'")
# Apply constraint renamings.
stat_vars = stat_vars.apply(
lambda row: remap_constraint_from_prop(row, prop_remap), axis=1)
# Remove dependent constraints.
stat_vars = remove_dependent_constraints(stat_vars)
stat_vars = left_fill_columns(stat_vars)
# Generate human readable names.
stat_vars['HumanReadableName'] = stat_vars.apply(row_to_human_readable)
# Manually rename special case.
stat_vars.loc[stat_vars['HumanReadableName'] == 'Count_Death_Medicare',
'HumanReadableName'] = "Count_Death_MedicareEnrollee"
return stat_vars
def output_stat_var_documentation(stat_vars):
""" Outputs markdown file of Statistical Variable list for documentation.
Outputs Statistical Variable list as a dropdown menu organized by verticals
and population type. Some verticals have no sub-population type grouping.
These groupings are defined in STAT_VAR_POPULATION_GROUPINGS. The markdown
file is output to statistical_variables.md.
Args:
stat_vars: The dataframe of statistical variable with
HumanReadableNames generated.
"""
# Dynamically generate list of disasters.
natural_disasters = []
for popType in stat_vars['populationType'].unique():
if "Event" in popType:
natural_disasters.append(popType)
# (False, True) -> Only group disaster StatVar by populationType
# if there are more than 1 statistical variables for that group.
svrc.STAT_VAR_POPULATION_GROUPINGS.append(
svrc.SVPopGroup(("Disasters", natural_disasters, False, True)))
# Assert that all population types belong to a category.
used = []
for _, group, _, _ in svrc.STAT_VAR_POPULATION_GROUPINGS:
used.extend(group)
for popType in stat_vars['populationType'].unique():
assert popType in used, (f"{popType} not sorted!")
# Output markdown file grouped by population type.
with open('statistical_variables.md', 'w', newline='') as f_out:
# Output heading.
f_out.write(svrc.DOCUMENTATION_BASE_MARKDOWN)
# Output each vertical group. Some verticals are always
# nested by population.
# If nested_grouping is True, and, if nested_grouping is false, then
# stat var groups larger than 1 are grouped
# if condense_big_groups is true.
for sv_grouping in svrc.STAT_VAR_POPULATION_GROUPINGS:
f_out.write(
svrc.DOCUMENTATION_HEADER_START.replace("{HEADER}",
sv_grouping.vertical))
for population_type in sv_grouping.popTypes:
# GroupBy popType if nested or condensed and more one stat var.
stat_vars_for_pop_type = stat_vars.query(
f"populationType == '{population_type}'"
)['HumanReadableName']
group_pop_type = (sv_grouping.subgroupAllPops
or (sv_grouping.subgroupIfMoreThanOne
and len(stat_vars_for_pop_type) > 1))
if group_pop_type:
f_out.write(
svrc.DOCUMENTATION_DROPDOWN_START.replace(
"{POPULATION_TYPE}", population_type))
# Output individual statistical variable as a link to DC graph.
for stat_var in stat_vars_for_pop_type:
f_out.write(
f" <li><a href=\"https://browser.datacommons.org/kg?dcid={stat_var}\">{stat_var}</a></li>\n"
)
# End popType group html.
if group_pop_type:
f_out.write(" </ul>\n")
f_out.write("</details>\n")
f_out.write("</details>\n")
def main(argv):
""" Executes the downloading, preprocessing, renaming, and output of MCF stat
var renaming.
Outputs:
renamed_stat_vars.mcf: MCF file to manifest into graph with the newly
created names for the statistical variables.
statistical_variables.md: A markdown file for use in documentation that
is grouped by population type.
statistical_variables.csv: A CSV, for debugging, that contains information
about each newly created statistical variable.
"""
# Authenticate a BigQuery client with production access.
client = authenticate_bq_client()
# Query for stat vars by combining population and observation tables.
stat_vars = download_stat_vars(client)
# Create human readable names.
stat_vars = create_human_readable_names(stat_vars, client)
# Limit to only existing statistical variables if chosen.
if ONLY_REGENERATE_OUTPUT:
stat_vars = remove_new_stat_vars(stat_vars, client)
stat_vars = stat_vars.query("statType != 'Unknown'")
# Apply filters for special cases as requested by various human reviewers.
stat_vars = stat_vars.query("numConstraints <= 3")
stat_vars = stat_vars.query("measuredProp != 'cohortScaleAchievement'")
stat_vars = stat_vars.query(
"measuredProp != 'gradeCohortScaleAchievement'")
stat_vars = stat_vars.query("populationType != 'AcademicAssessmentEvent'")
# Sort final output for markdown and CSV output.
stat_vars = stat_vars.sort_values([
'populationType', 'numConstraints', 'statType', 'measuredProp', 'p1',
'p2', 'p3', 'p4', 'p5', 'p6', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6',
'measurementDenominator', 'measurementQualifier'
])
# Check to make sure that there are no statistical variable collisions.
stat_vars = ensure_no_overlapping_stat_vars(stat_vars)
# Output statistical variable markdown file.
output_stat_var_documentation(stat_vars)
# Output statistical variable MCF.
with open('renamed_stat_vars.mcf', 'w', newline='') as f_out:
for _, row in stat_vars.iterrows():
f_out.write(row_to_stat_var_mcf(row))
# Output CSV for debugging.
stat_vars = stat_vars.fillna("None")
stat_vars[[
'numConstraints', 'populationType', 'measuredProp', 'p1', 'v1', 'p2',
'v2', 'p3', 'v3', 'HumanReadableName', 'orig_p1', 'orig_v1', 'orig_p2',
'orig_v2', 'orig_p3', 'orig_v3', 'orig_p4', 'orig_v4'
]].to_csv("statistical_variables.csv", index=False)
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
haisland0909/Denoising-Dirty-Documents | script/classify.py | 1 | 4675 | '''
Created on 2015/08/28
@author: haisland0909
'''
from sklearn.pipeline import FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn import cross_validation
from sklearn import preprocessing
from sklearn.metrics import mean_absolute_error
import sklearn.linear_model
import sklearn.ensemble
import img_to_pickle as i_p
import features as f
import numpy as np
import pandas as pd
clf_dict = {
'LR': {
"name": 'L2 Logistic Regression',
"clf": sklearn.linear_model.LogisticRegression(penalty='l2', dual=False),
"paramteters": {'C': [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0]}
},
'GB2': {
"name": 'Gradient Boosting New',
"clf": sklearn.ensemble.GradientBoostingClassifier(random_state=1),
"paramteters": {
'learning_rate': [0.005, 0.01, 0.1],
'n_estimators': [50, 250, 500],
'subsample': [1.0, 0.8],
'max_features': [1.0, 0.8],
'min_samples_split': [2],
'min_samples_leaf': [1, 2],
'max_depth': [2, 5, 8]
}
}
}
def get_data():
'''
get X, y data
:rtype: tuple
'''
_, _, _, train_gray_data, _, _, labels = i_p.load_data()
data_df = f.make_data_df(train_gray_data, labels)
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
X = fu.fit_transform(data_df)
y = np.concatenate(data_df["label"].apply(lambda x: x.flatten()))
return (X, y)
def get_data_Kfold(mode):
'''
get X, y data
:rtype: tuple
'''
if mode == "train":
_, _, _, train_gray_data, _, _, labels = i_p.load_data()
data_df = f.make_data_df(train_gray_data, labels)
data_df = data_df.reset_index()
data_df.columns = ["pngname", "input", "label"]
keys = np.asarray(train_gray_data.keys())
kf = cross_validation.KFold(n=len(keys), n_folds=5)
return data_df, keys, kf
elif mode == "test":
_, _, _, _, test_gray_data, _, _ = i_p.load_data()
return test_gray_data
else:
print "mode error!"
print "set \"train\" or \"test\""
quit()
def set_validdata(df, keys):
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
Std = preprocessing.StandardScaler()
for i in xrange(len(keys)):
if i == 0:
valid_df = df[(df["pngname"] == keys[i])]
else:
valid_df = pd.concat([valid_df, df[(df["pngname"] == keys[i])]])
valid_df = valid_df.drop("pngname", axis=1).reset_index()
X = fu.fit_transform(valid_df)
y = np.concatenate(valid_df["label"].apply(lambda x: x.flatten()))
X = Std.fit_transform(X)
return (X, y)
def set_traindata(df, key):
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
Std = preprocessing.StandardScaler()
X = fu.fit_transform(df)
y = np.concatenate(df["label"].apply(lambda x: x.flatten()))
X = Std.fit_transform(X)
return (X, y)
def kfold_validation_model(model_name="LR"):
data_df, keys, kf = get_data_Kfold("train")
"""
SGD Regression model with stochastic gradient descent
Prnalty : L2
"""
scores = []
cnt = 1
for train_index, valid_index in kf:
print cnt
cnt += 1
clf = sklearn.linear_model.SGDRegressor(penalty='l2')
train_keys = keys[train_index]
valid_keys = keys[valid_index]
for i in xrange(len(train_keys)):
train_X, train_y = set_traindata(data_df, train_keys[i])
clf.partial_fit(train_X, train_y)
valid_X, valid_y = set_validdata(data_df, valid_keys)
# predict_prova = clf.predict(valid_X)
predict_y = clf.predict(valid_X)
score = mean_absolute_error(valid_y, predict_y)
scores.append(score)
print scores
print "Score_Average:", np.average(np.asarray(scores))
def cross_validation_model(model_name="LR"):
X, y = get_data()
clf = GridSearchCV(estimator=clf_dict[model_name]["clf"],
param_grid=clf_dict[model_name]["paramteters"],
n_jobs=3, scoring="accuracy")
scores = cross_validation.cross_val_score(clf, X, y, cv=5)
print scores
def downsampling_data(X, y, ratio=0.5, random_state=1):
np.random.seed(random_state)
assert X.shape[0] == y.size
length = X.shape[0]
len_range = range(0, length)
use_length = int(length * ratio)
use_index = np.random.choice(len_range, use_length, replace=False)
use_X = X[use_index, :]
use_y = y[use_index]
return (use_X, use_y)
if __name__ == '__main__':
# cross_validation_model()
kfold_validation_model()
| apache-2.0 |
h2educ/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
maweigert/biobeam | tests/test_core/test_dn_mode.py | 1 | 1287 | """
mweigert@mpi-cbg.de
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from biobeam import Bpm3d
from six.moves import zip
import matplotlib.pyplot as plt
def test_plane():
dx = .02
lam = .5
Nx = 128
Ny = 256
Nz = 400
dn0 = .1
dn = dn0 * np.ones((Nz, Ny, Nx))
m = Bpm3d(dn=dn,
n0=1.,
units=(dx,) * 3, lam=lam)
# plane wave
z = np.arange(Nz)
u0 = np.exp(2.j * np.pi / lam * (1. + dn0) * z * dx)
modes = ["none", "global", "local"]
us = [m.propagate(dn_mean_method=mode) for mode in modes]
for mode, u in zip(modes, us):
print("diff (%s):\t%.3g" % (mode, np.mean(np.abs(u[:, Ny // 2, Nx // 2] - u0))))
if __name__ == '__main__':
dx = .02
lam = .5
Nx = 128
Ny = 256
Nz = 400
dn0 = .4
dn = np.zeros((Nz,Ny,Nx))
dn[Nz//3:2*Nz//3,Ny//3:2*Ny//3,Nx//3:2*Nx//3] = dn0
m = Bpm3d(dn = dn,
n0 =1.,
units = (dx,)*3,lam = lam)
modes = ["none", "global", "local"]
us = [m.propagate(dn_mean_method=mode) for mode in modes]
plt.figure(1)
plt.clf()
for _u,t in zip(us,modes):
plt.plot(_u[:,Ny//2, Nx//2], label = t)
plt.legend()
plt.show()
| bsd-3-clause |
alexsavio/scikit-learn | sklearn/decomposition/__init__.py | 76 | 1490 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
JapuDCret/RocketMap-Do | pogom/geofence.py | 14 | 5762 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import timeit
import logging
from .utils import get_args
log = logging.getLogger(__name__)
args = get_args()
# Trying to import matplotlib, which is not compatible with all hardware.
# Matlplotlib is faster for big calculations.
try:
from matplotlib.path import Path
except ImportError as e:
# Pass as this is an optional requirement. We're going to check later if it
# was properly imported and only use it if it's installed.
pass
class Geofences:
def __init__(self):
self.geofenced_areas = []
self.excluded_areas = []
self.use_matplotlib = 'matplotlib' in sys.modules
if args.geofence_file or args.geofence_excluded_file:
log.info('Loading geofenced or excluded areas.')
self.geofenced_areas = self.parse_geofences_file(
args.geofence_file, excluded=False)
self.excluded_areas = self.parse_geofences_file(
args.geofence_excluded_file, excluded=True)
log.info('Loaded %d geofenced and %d excluded areas.',
len(self.geofenced_areas),
len(self.excluded_areas))
def is_enabled(self):
return (self.geofenced_areas or self.excluded_areas)
def get_geofenced_coordinates(self, coordinates):
log.info('Using matplotlib: %s.', self.use_matplotlib)
log.info('Found %d coordinates to geofence.', len(coordinates))
geofenced_coordinates = []
startTime = timeit.default_timer()
for c in coordinates:
# Coordinate is not valid if in one excluded area.
if self._is_excluded(c):
continue
# Coordinate is geofenced if in one geofenced area.
if self.geofenced_areas:
for va in self.geofenced_areas:
if self._in_area(c, va):
geofenced_coordinates.append(c)
break
else:
geofenced_coordinates.append(c)
elapsedTime = timeit.default_timer() - startTime
log.info('Geofenced to %s coordinates in %.2fs.',
len(geofenced_coordinates), elapsedTime)
return geofenced_coordinates
def _is_excluded(self, coordinate):
for ea in self.excluded_areas:
if self._in_area(coordinate, ea):
return True
return False
def _in_area(self, coordinate, area):
if args.spawnpoint_scanning:
point = {'lat': coordinate['lat'], 'lon': coordinate['lng']}
else:
point = {'lat': coordinate[0], 'lon': coordinate[1]}
polygon = area['polygon']
if self.use_matplotlib:
return self.is_point_in_polygon_matplotlib(point, polygon)
else:
return self.is_point_in_polygon_custom(point, polygon)
@staticmethod
def parse_geofences_file(geofence_file, excluded):
geofences = []
# Read coordinates of excluded areas from file.
if geofence_file:
with open(geofence_file) as f:
for line in f:
line = line.strip()
if len(line) == 0: # Empty line.
continue
elif line.startswith("["): # Name line.
name = line.replace("[", "").replace("]", "")
geofences.append({
'excluded': excluded,
'name': name,
'polygon': []
})
log.debug('Found geofence: %s.', name)
else: # Coordinate line.
lat, lon = line.split(",")
LatLon = {'lat': float(lat), 'lon': float(lon)}
geofences[-1]['polygon'].append(LatLon)
return geofences
@staticmethod
def is_point_in_polygon_matplotlib(point, polygon):
pointTuple = (point['lat'], point['lon'])
polygonTupleList = []
for c in polygon:
coordinateTuple = (c['lat'], c['lon'])
polygonTupleList.append(coordinateTuple)
polygonTupleList.append(polygonTupleList[0])
path = Path(polygonTupleList)
return path.contains_point(pointTuple)
@staticmethod
def is_point_in_polygon_custom(point, polygon):
# Initialize first coordinate as default.
maxLat = polygon[0]['lat']
minLat = polygon[0]['lat']
maxLon = polygon[0]['lon']
minLon = polygon[0]['lon']
for coords in polygon:
maxLat = max(coords['lat'], maxLat)
minLat = min(coords['lat'], minLat)
maxLon = max(coords['lon'], maxLon)
minLon = min(coords['lon'], minLon)
if ((point['lat'] > maxLat) or (point['lat'] < minLat) or
(point['lon'] > maxLon) or (point['lon'] < minLon)):
return False
inside = False
lat1, lon1 = polygon[0]['lat'], polygon[0]['lon']
N = len(polygon)
for n in range(1, N+1):
lat2, lon2 = polygon[n % N]['lat'], polygon[n % N]['lon']
if (min(lon1, lon2) < point['lon'] <= max(lon1, lon2) and
point['lat'] <= max(lat1, lat2)):
if lon1 != lon2:
latIntersection = (
(point['lon'] - lon1) *
(lat2 - lat1) / (lon2 - lon1) +
lat1)
if lat1 == lat2 or point['lat'] <= latIntersection:
inside = not inside
lat1, lon1 = lat2, lon2
return inside
| agpl-3.0 |
lweasel/piquant | test/test_tpms.py | 1 | 7975 | import numpy as np
import numpy.testing as npt
import pandas as pd
import piquant.tpms as t
REAL_TPMS_VALS = [0.05, 0.02, 15, 2, 10, 30, 11]
CALC_TPMS_VALS = [0.03, 20, 3, 0.01, 5, 20, 10]
GROUPS = [0, 1, 0, 1, 0, 1, 1]
GROUP_TEST_COL = "group_test"
NOT_PRESENT_CUTOFF = 0.1
def _get_test_tpms():
tpms = pd.DataFrame.from_dict({
t.REAL_TPM: REAL_TPMS_VALS,
t.CALCULATED_TPM: CALC_TPMS_VALS,
GROUP_TEST_COL: GROUPS
})
return tpms
def _get_test_tp_tpms():
tpms = _get_test_tpms()
return tpms[(tpms[t.REAL_TPM] > NOT_PRESENT_CUTOFF) &
(tpms[t.CALCULATED_TPM] > NOT_PRESENT_CUTOFF)]
def _true_positive(real_tpm, calculated_tpm):
return real_tpm > NOT_PRESENT_CUTOFF and \
calculated_tpm > NOT_PRESENT_CUTOFF
def _true_negative(real_tpm, calculated_tpm):
return real_tpm < NOT_PRESENT_CUTOFF and \
calculated_tpm < NOT_PRESENT_CUTOFF
def _false_negative(real_tpm, calculated_tpm):
return real_tpm > NOT_PRESENT_CUTOFF and \
calculated_tpm < NOT_PRESENT_CUTOFF
def _false_positive(real_tpm, calculated_tpm):
return real_tpm < NOT_PRESENT_CUTOFF and \
calculated_tpm > NOT_PRESENT_CUTOFF
class _DummyStatistic:
def __init__(self, name, true_positives):
self.name = name
self.true_positives = true_positives
def calculate(self, tpms, tp_tpms):
df = tp_tpms if self.true_positives else tpms
return len(df)
def calculate_grouped(self, grouped, summary, tp_grouped, tp_summary):
df = tp_summary if self.true_positives else summary
return df[t.REAL_TPM].unstack()["count"]
class _DummyClassifier:
def __init__(self, name, value_func=lambda x: x[t.REAL_TPM]):
self.name = name
self.value_func = value_func
def get_column_name(self):
return self.name
def get_classification_value(self, x):
return self.value_func(x)
def test_mark_positives_negatives_marks_correct_entries_as_true_positive():
tpms = _get_test_tpms()
t.mark_positives_and_negatives(NOT_PRESENT_CUTOFF, tpms)
for index, row in tpms.iterrows():
if _true_positive(row[t.REAL_TPM], row[t.CALCULATED_TPM]):
assert row[t.TRUE_POSITIVE]
assert not row[t.FALSE_POSITIVE]
assert not row[t.TRUE_NEGATIVE]
assert not row[t.FALSE_NEGATIVE]
else:
assert not row[t.TRUE_POSITIVE]
def test_mark_positives_negatives_marks_correct_entries_as_false_positive():
tpms = _get_test_tpms()
t.mark_positives_and_negatives(NOT_PRESENT_CUTOFF, tpms)
for index, row in tpms.iterrows():
if _false_positive(row[t.REAL_TPM], row[t.CALCULATED_TPM]):
assert row[t.FALSE_POSITIVE]
assert not row[t.TRUE_POSITIVE]
assert not row[t.TRUE_NEGATIVE]
assert not row[t.FALSE_NEGATIVE]
else:
assert not row[t.FALSE_POSITIVE]
def test_mark_positives_negatives_marks_correct_entries_as_true_negative():
tpms = _get_test_tpms()
t.mark_positives_and_negatives(NOT_PRESENT_CUTOFF, tpms)
for index, row in tpms.iterrows():
if _true_negative(row[t.REAL_TPM], row[t.CALCULATED_TPM]):
assert row[t.TRUE_NEGATIVE]
assert not row[t.FALSE_POSITIVE]
assert not row[t.TRUE_POSITIVE]
assert not row[t.FALSE_NEGATIVE]
else:
assert not row[t.TRUE_NEGATIVE]
def test_mark_positives_negatives_marks_correct_entries_as_false_negative():
tpms = _get_test_tpms()
t.mark_positives_and_negatives(NOT_PRESENT_CUTOFF, tpms)
for index, row in tpms.iterrows():
if _false_negative(row[t.REAL_TPM], row[t.CALCULATED_TPM]):
assert row[t.FALSE_NEGATIVE]
assert not row[t.FALSE_POSITIVE]
assert not row[t.TRUE_NEGATIVE]
assert not row[t.TRUE_POSITIVE]
else:
assert not row[t.FALSE_NEGATIVE]
def test_get_true_positives_returns_correct_number_of_entries():
tpms = _get_test_tpms()
t.mark_positives_and_negatives(NOT_PRESENT_CUTOFF, tpms)
tp_tpms = t.get_true_positives(tpms)
assert len(tp_tpms) == \
len([x for x, y in zip(CALC_TPMS_VALS, REAL_TPMS_VALS)
if x > NOT_PRESENT_CUTOFF and y > NOT_PRESENT_CUTOFF])
def test_calculate_percent_error_calculates_correct_values():
tpms = _get_test_tpms()
t.calculate_percent_error(tpms)
for index, row in tpms.iterrows():
val = 100 * ((CALC_TPMS_VALS[index] - REAL_TPMS_VALS[index])
/ float(REAL_TPMS_VALS[index]))
npt.assert_approx_equal(row[t.PERCENT_ERROR], val)
def test_calculate_log_ratios_calculates_correct_values():
tpms = _get_test_tpms()
t.calculate_log_ratios(tpms)
for index, row in tpms.iterrows():
val = np.log10(CALC_TPMS_VALS[index] / float(REAL_TPMS_VALS[index]))
npt.assert_approx_equal(row[t.LOG10_RATIO], val)
def test_apply_classifiers_adds_correct_columns():
classifier_names = ["a", "b", "c"]
classifiers = [_DummyClassifier(x) for x in classifier_names]
tpms = _get_test_tpms()
t.apply_classifiers(tpms, classifiers)
for name in classifier_names:
assert tpms[name] is not None
def test_apply_classifiers_calculates_correct_values():
name = "dummy"
val = 5
classifiers = [_DummyClassifier(name, lambda x: x[t.CALCULATED_TPM] + val)]
tpms = _get_test_tpms()
t.apply_classifiers(tpms, classifiers)
for index, row in tpms.iterrows():
assert row[name] == CALC_TPMS_VALS[index] + val
def test_get_stats_returns_correct_number_of_statistics():
num_statistics = 5
statistics = [_DummyStatistic(str(i), False)
for i in range(num_statistics)]
tpms = _get_test_tpms()
tp_tpms = _get_test_tp_tpms()
stats = t.get_stats(tpms, tp_tpms, statistics)
assert len(stats.columns) == num_statistics
def test_get_stats_returns_correct_column_names():
name1 = "dummy1"
name2 = "dummy2"
statistics = [_DummyStatistic(name1, False), _DummyStatistic(name2, False)]
tpms = _get_test_tpms()
tp_tpms = _get_test_tp_tpms()
stats = t.get_stats(tpms, tp_tpms, statistics)
assert name1 in stats.columns
assert name2 in stats.columns
def test_get_stats_calculates_correct_values():
name1 = "dummy1"
name2 = "dummy2"
statistics = [_DummyStatistic(name1, False), _DummyStatistic(name2, True)]
tpms = _get_test_tpms()
tp_tpms = _get_test_tp_tpms()
stats = t.get_stats(tpms, tp_tpms, statistics)
assert stats[name1].ix[0] == len(tpms)
assert stats[name2].ix[0] == len(tp_tpms)
def test_get_grouped_stats_returns_correct_number_of_statistics():
num_statistics = 5
statistics = [_DummyStatistic("c" + str(i), False)
for i in range(num_statistics)]
tpms = _get_test_tpms()
tp_tpms = _get_test_tp_tpms()
stats = t.get_grouped_stats(tpms, tp_tpms, GROUP_TEST_COL, statistics)
assert len(stats.columns) == num_statistics
def test_get_grouped_stats_returns_correct_column_names():
name1 = "dummy1"
name2 = "dummy2"
statistics = [_DummyStatistic(name1, False), _DummyStatistic(name2, False)]
tpms = _get_test_tpms()
tp_tpms = _get_test_tp_tpms()
stats = t.get_grouped_stats(tpms, tp_tpms, GROUP_TEST_COL, statistics)
assert name1 in stats.columns
assert name2 in stats.columns
def test_get_grouped_stats_calculates_correct_values():
name1 = "dummy1"
name2 = "dummy2"
statistics = [_DummyStatistic(name1, False), _DummyStatistic(name2, True)]
tpms = _get_test_tpms()
tp_tpms = _get_test_tp_tpms()
stats = t.get_grouped_stats(tpms, tp_tpms, GROUP_TEST_COL, statistics)
for group in set(GROUPS):
assert stats[name1].ix[group] == len(tpms[tpms[GROUP_TEST_COL] == group])
assert stats[name2].ix[group] == len(tp_tpms[tp_tpms[GROUP_TEST_COL] == group])
| mit |
cadowd/proppy | plane_estimate.py | 1 | 4272 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 10 12:04:48 2016
Flying wing drag polar estimator for low Re model scales
@author: c.dowd
"""
import numpy as np
import consumption_functions
import matplotlib.pyplot as plt
def getSpans(U_stall, plane, atmosphere):
"""
Returns the drag of a given plane in a given atmosphere at the specified
flight speed. The plane is modelled through a quadratic approximation
to its drag coefficient to lift coefficient polar.
"""
mass=payload/payload_frac
aero_factor=plane['aero_factor']
rho=atmosphere['rho']
# mass=plane['mass']
L=9.8*mass
tc_ratio=0.2
CL_max=1.0+aero_factor #range from 1 to 2 (best possible with flaps)
CL_max_swept=CL_max*np.cos(theta_sweep)
U_thrown_min=5
static_acc_min=5
allowable_drop=0.75
drop_time=np.sqrt(2*allowable_drop/9.81)
U_stall=U_thrown_min+static_acc_min*drop_time
print(plane_mass)
S_ref=2*L/(rho*U_stall**2*CL_max_swept)
print(S_ref)
c_root=body_length
AR_max=4*S_ref/c_root**2 #50.0
AR_min=S_ref/c_root**2 #2
b_min=np.sqrt(S_ref*AR_min)
b_max=np.sqrt(S_ref*AR_max)
# print(b_max/AR_max)
AR=AR_min + AR_factor*(AR_max-AR_min)
b=np.sqrt(S_ref*AR)
c_root=body_length
c_tip=2*S_ref/b-c_root
def dragFunc(U, plane, atmosphere):
"""
Returns the drag of a given plane in a given atmosphere at the specified
flight speed. The plane is modelled through a quadratic approximation
to its drag coefficient to lift coefficient polar.
"""
print(b)
Z=2*np.cos(theta_sweep)
K=1+Z*(tc_ratio)+100*(tc_ratio)**4 #Form factor of wing, Shevell 1989
print(K)
Re=U*np.sqrt(S_ref)*rho/atmosphere['mu'] #Reynolds number
C_f=0.455/(np.log10(Re)**2.58) #Schlichting Prandtl skin friction
# print(C_f)
S_wet=S_ref*2*1.02
Cd_p=Q/Sref*K*C_f*S_wet
print(Cd_p)
#Add for fuselage and vertical surfaces
lambda_t=c_tip/c_root - 0.357 + 0.45*np.exp(0.0375*theta_sweep) #Taper ratio with sweep correction
func_lambda=0.0524*lambda_t**4 - 0.15*lambda_t**3 + 0.1659*lambda_t**2 - 0.0706*lambda_t + 0.0119
e_th=1/(1+func_lambda*AR)
#Correction factors for the Oswald form factor
ke_D0=0.9 #Rough estimation for general aviation
ke_WL=1.00 #Should be accounted for properly maybe
ke_f= 1-2*(body_height/b) #Accounts for fuselage
e_total=e_th*ke_f*ke_D0*ke_WL #total Oswald form factor
print(e_total)
Cl=2*L/(rho*U**2*S_ref)
Cd_if=1/(np.pi*e_total*AR)
print(Cd_if)
C_d=Cd_p + Cd_if*Cl**2
# C_d=0.0174 + 0.1156*Cl**2
# Re=U*np.sqrt(plane['ref_area'])*atmosphere['rho']/atmosphere['mu'] #Reynolds number
drag=C_d*0.5*rho*plane['ref_area']*U**2
return drag
if __name__ == "__main__":
L=0.7 #Wing length
c=0.2 #Wing chord length
Sref=0.3259#+0.2
b_t= 1.2 #total span of craft
Q=1.2 #to 1.3 depends on other protruding parts
payload=0.15 #payload mass
theta_sweep=30*np.pi/180
body_length=0.4
body_height=0.07
# rho=1.15
payload_frac=0.125 #RESEARCH
aero_factor= 1.0 #0 to 1
AR_factor=0.2 #0 to 1
plane1={
'C_d':0.04,
'ref_area':0.3259,
'mass': 1.15, #mass in kilos
'C_1': 0.1156,
'C_2': -0.0069,
'C_3': 0.0174,
'loss_factor': 1.0, #Assumed to scale with reynolds
'loss_ref_length': 1,
'theta_sweep': 30,
'body_length': 0.4,
'aero_factor':0.5
}
atmosphere={
'rho':1.10, #
'mu':1.8e-5, #dynamic viscosity
'Ta': 25, #Ambient temperature
}
plane_mass=payload/payload_frac
U_range=np.linspace(5,20,100)
drag=dragFunc(U_range,plane1,atmosphere)
drag_real=consumption_functions.dragFunc(U_range,plane1,atmosphere)
plt.plot(U_range, drag, label='Estimate')
plt.plot(U_range, drag_real, label='real')
plt.legend(loc=4)
plt.show()
| gpl-3.0 |
rahuldhote/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/api/radar_chart.py | 3 | 6539 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
def radar_factory(num_vars, frame='circle'):
"""Create a radar chart with `num_vars` axes."""
# calculate evenly-spaced axis angles
theta = 2*np.pi * np.linspace(0, 1-1./num_vars, num_vars)
# rotate theta such that the first axis is at the top
theta += np.pi/2
def draw_poly_frame(self, x0, y0, r):
# TODO: use transforms to convert (x, y) to (r, theta)
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return plt.Polygon(verts, closed=True, edgecolor='k')
def draw_circle_frame(self, x0, y0, r):
return plt.Circle((x0, y0), r)
frame_dict = {'polygon': draw_poly_frame, 'circle': draw_circle_frame}
if frame not in frame_dict:
raise ValueError, 'unknown value for `frame`: %s' % frame
class RadarAxes(PolarAxes):
"""Class for creating a radar chart (a.k.a. a spider or star chart)
http://en.wikipedia.org/wiki/Radar_chart
"""
name = 'radar'
# use 1 line segment to connect specified points
RESOLUTION = 1
# define draw_frame method
draw_frame = frame_dict[frame]
def fill(self, *args, **kwargs):
"""Override fill so that line is closed by default"""
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
"""Override plot so that line is closed by default"""
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
# FIXME: markers at x[0], y[0] get doubled-up
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(theta * 180/np.pi, labels)
def _gen_axes_patch(self):
x0, y0 = (0.5, 0.5)
r = 0.5
return self.draw_frame(x0, y0, r)
register_projection(RadarAxes)
return theta
if __name__ == '__main__':
#The following data is from the Denver Aerosol Sources and Health study.
#See doi:10.1016/j.atmosenv.2008.12.017
#
#The data are pollution source profile estimates for five modeled pollution
#sources (e.g., cars, wood-burning, etc) that emit 7-9 chemical species.
#The radar charts are experimented with here to see if we can nicely
#visualize how the modeled source profiles change across four scenarios:
# 1) No gas-phase species present, just seven particulate counts on
# Sulfate
# Nitrate
# Elemental Carbon (EC)
# Organic Carbon fraction 1 (OC)
# Organic Carbon fraction 2 (OC2)
# Organic Carbon fraction 3 (OC3)
# Pyrolized Organic Carbon (OP)
# 2)Inclusion of gas-phase specie carbon monoxide (CO)
# 3)Inclusion of gas-phase specie ozone (O3).
# 4)Inclusion of both gas-phase speciesis present...
N = 9
theta = radar_factory(N)
spoke_labels = ['Sulfate', 'Nitrate', 'EC', 'OC1', 'OC2', 'OC3', 'OP', 'CO',
'O3']
f1_base = [0.88, 0.01, 0.03, 0.03, 0.00, 0.06, 0.01, 0.00, 0.00]
f1_CO = [0.88, 0.02, 0.02, 0.02, 0.00, 0.05, 0.00, 0.05, 0.00]
f1_O3 = [0.89, 0.01, 0.07, 0.00, 0.00, 0.05, 0.00, 0.00, 0.03]
f1_both = [0.87, 0.01, 0.08, 0.00, 0.00, 0.04, 0.00, 0.00, 0.01]
f2_base = [0.07, 0.95, 0.04, 0.05, 0.00, 0.02, 0.01, 0.00, 0.00]
f2_CO = [0.08, 0.94, 0.04, 0.02, 0.00, 0.01, 0.12, 0.04, 0.00]
f2_O3 = [0.07, 0.95, 0.05, 0.04, 0.00, 0.02, 0.12, 0.00, 0.00]
f2_both = [0.09, 0.95, 0.02, 0.03, 0.00, 0.01, 0.13, 0.06, 0.00]
f3_base = [0.01, 0.02, 0.85, 0.19, 0.05, 0.10, 0.00, 0.00, 0.00]
f3_CO = [0.01, 0.01, 0.79, 0.10, 0.00, 0.05, 0.00, 0.31, 0.00]
f3_O3 = [0.01, 0.02, 0.86, 0.27, 0.16, 0.19, 0.00, 0.00, 0.00]
f3_both = [0.01, 0.02, 0.71, 0.24, 0.13, 0.16, 0.00, 0.50, 0.00]
f4_base = [0.02, 0.01, 0.07, 0.01, 0.21, 0.12, 0.98, 0.00, 0.00]
f4_CO = [0.00, 0.02, 0.03, 0.38, 0.31, 0.31, 0.00, 0.59, 0.00]
f4_O3 = [0.01, 0.03, 0.00, 0.32, 0.29, 0.27, 0.00, 0.00, 0.95]
f4_both = [0.01, 0.03, 0.00, 0.28, 0.24, 0.23, 0.00, 0.44, 0.88]
f5_base = [0.01, 0.01, 0.02, 0.71, 0.74, 0.70, 0.00, 0.00, 0.00]
f5_CO = [0.02, 0.02, 0.11, 0.47, 0.69, 0.58, 0.88, 0.00, 0.00]
f5_O3 = [0.02, 0.00, 0.03, 0.37, 0.56, 0.47, 0.87, 0.00, 0.00]
f5_both = [0.02, 0.00, 0.18, 0.45, 0.64, 0.55, 0.86, 0.00, 0.16]
fig = plt.figure(figsize=(9,9))
# adjust spacing around the subplots
fig.subplots_adjust(wspace=0.25, hspace=0.20, top=0.85, bottom=0.05)
title_list = ['Basecase', 'With CO', 'With O3', 'CO & O3']
data = {'Basecase': [f1_base, f2_base, f3_base, f4_base, f5_base],
'With CO': [f1_CO, f2_CO, f3_CO, f4_CO, f5_CO],
'With O3': [f1_O3, f2_O3, f3_O3, f4_O3, f5_O3],
'CO & O3': [f1_both, f2_both, f3_both, f4_both, f5_both]}
colors = ['b', 'r', 'g', 'm', 'y']
# chemicals range from 0 to 1
radial_grid = [0.2, 0.4, 0.6, 0.8]
# If you don't care about the order, you can loop over data_dict.items()
for n, title in enumerate(title_list):
ax = fig.add_subplot(2, 2, n+1, projection='radar')
plt.rgrids(radial_grid)
ax.set_title(title, weight='bold', size='medium', position=(0.5, 1.1),
horizontalalignment='center', verticalalignment='center')
for d, color in zip(data[title], colors):
ax.plot(theta, d, color=color)
ax.fill(theta, d, facecolor=color, alpha=0.25)
ax.set_varlabels(spoke_labels)
# add legend relative to top-left plot
plt.subplot(2,2,1)
labels = ('Factor 1', 'Factor 2', 'Factor 3', 'Factor 4', 'Factor 5')
legend = plt.legend(labels, loc=(0.9, .95), labelspacing=0.1)
plt.setp(legend.get_texts(), fontsize='small')
plt.figtext(0.5, 0.965, '5-Factor Solution Profiles Across Four Scenarios',
ha='center', color='black', weight='bold', size='large')
plt.show()
| gpl-2.0 |
gigglesninja/senior-design | MissionPlanner/Lib/site-packages/scipy/signal/fir_filter_design.py | 53 | 18572 | """Functions for FIR filter design."""
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
import sigtools
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21)**0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and stopband
(or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta :
The beta parameter for the kaiser window.
Notes
-----
There are several ways to obtain the Kaiser window:
signal.kaiser(numtaps, beta, sym=0)
signal.get_window(beta, numtaps)
signal.get_window(('kaiser', beta), numtaps)
The empirical equations discovered by Kaiser are used.
See Also
--------
kaiser_beta, kaiser_atten
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response filter.
The filter will have linear phase; it will be Type I if `numtaps` is odd and
Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True);
`nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : 1D ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
Examples
--------
Low-pass from 0 to f::
>>> firwin(numtaps, f)
Use a specific window function::
>>> firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>>firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
See also
--------
scipy.signal.firwin2
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width)/nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff is even,
# and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0]*pass_zero, cutoff, [1.0]*pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of a passband.
bands = cutoff.reshape(-1,2)
# Build up the coefficients.
alpha = 0.5 * (numtaps-1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Example
-------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s' % (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps,2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq)-1 and freq[k] == freq[k+1]:
freq[k] = freq[k] - eps
freq[k+1] = freq[k+1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps-1)/2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> bpass = sp.signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = sp.signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
[<matplotlib.lines.Line2D object at 0xf486790>]
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass':1, 'differentiator':2, 'hilbert':3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| gpl-2.0 |
anthrotype/freetype-py | examples/glyph-outline.py | 3 | 1282 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011-2015 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Glyph outline rendering
'''
from freetype import *
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
face = Face('./Vera.ttf')
face.set_char_size( 4*48*64 )
flags = FT_LOAD_DEFAULT | FT_LOAD_NO_BITMAP
face.load_char('S', flags )
slot = face.glyph
glyph = slot.get_glyph()
stroker = Stroker( )
stroker.set(64, FT_STROKER_LINECAP_ROUND, FT_STROKER_LINEJOIN_ROUND, 0 )
glyph.stroke( stroker )
blyph = glyph.to_bitmap(FT_RENDER_MODE_NORMAL, Vector(0,0))
bitmap = blyph.bitmap
width, rows, pitch = bitmap.width, bitmap.rows, bitmap.pitch
top, left = blyph.top, blyph.left
data = []
for i in range(rows):
data.extend(bitmap.buffer[i*pitch:i*pitch+width])
Z = numpy.array(data,dtype=numpy.ubyte).reshape(rows, width)
plt.figure(figsize=(6,8))
plt.imshow(Z, interpolation='nearest', cmap=plt.cm.gray_r, origin='lower')
plt.show()
| bsd-3-clause |
kylerbrown/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
catalyst-cooperative/pudl | src/pudl/analysis/service_territory.py | 1 | 19410 | """
Compile historical utility and balancing area territories.
Use the mapping of utilities to counties, and balancing areas to utilities, available
within the EIA 861, in conjunction with the US Census geometries for counties, to
infer the historical spatial extent of utility and balancing area territories. Output
the resulting geometries for use in other applications.
"""
import argparse
import logging
import math
import sys
import coloredlogs
import contextily as ctx
import pandas as pd
import sqlalchemy as sa
from matplotlib import pyplot as plt
import pudl
logger = logging.getLogger(__name__)
################################################################################
# Coordinate Reference Systems used in different contexts
################################################################################
MAP_CRS = "EPSG:3857" # For mapping w/ OSM baselayer tiles
CALC_CRS = "ESRI:102003" # For accurate area calculations
def get_all_utils(pudl_out):
"""
Compile IDs and Names of all known EIA Utilities.
Grab all EIA utility names and IDs from both the EIA 861 Service Territory table and
the EIA 860 Utility entity table. This is a temporary function that's only needed
because we haven't integrated the EIA 861 information into the entity harvesting
process and PUDL database yet.
Args:
pudl_out (pudl.output.pudltabl.PudlTabl): The PUDL output object which should be
used to obtain PUDL data.
Returns:
pandas.DataFrame: Having 2 columns ``utility_id_eia`` and ``utility_name_eia``.
"""
return (
pd.concat([
pudl_out.utils_eia860()[["utility_id_eia", "utility_name_eia"]],
pudl_out.service_territory_eia861()[["utility_id_eia", "utility_name_eia"]],
]).dropna(subset=["utility_id_eia"]).drop_duplicates(subset=["utility_id_eia"])
)
################################################################################
# Functions that compile geometries based on EIA 861 data tables:
################################################################################
def get_territory_fips(ids, assn, assn_col, st_eia861, limit_by_state=True):
"""
Compile county FIPS codes associated with an entity's service territory.
For each entity identified by ids, look up the set of counties associated
with that entity on an annual basis. Optionally limit the set of counties
to those within states where the selected entities reported activity elsewhere
within the EIA 861 data.
Args:
ids (iterable of ints): A collection of EIA utility or balancing authority IDs.
assn (pandas.DataFrame): Association table, relating ``report_date``,
``state``, and ``utility_id_eia`` to each other, as well as the
column indicated by ``assn_col`` -- if it's not ``utility_id_eia``.
assn_col (str): Label of the dataframe column in ``assn`` that contains
the ID of the entities of interest. Should probably be either
``balancing_authority_id_eia`` or ``utility_id_eia``.
st_eia861 (pandas.DataFrame): The EIA 861 Service Territory table.
limit_by_state (bool): Whether to require that the counties associated
with the balancing authority are inside a state that has also been
seen in association with the balancing authority and the utility
whose service territory contians the county.
Returns:
pandas.DataFrame: A table associating the entity IDs with a collection of
counties annually, identifying counties both by name and county_id_fips
(both state and state_id_fips are included for clarity).
"""
# Limit the association table to only the relevant values:
assn = assn.loc[assn[assn_col].isin(ids)]
if not limit_by_state:
assn = assn.drop("state", axis="columns")
return (
pd.merge(assn, st_eia861, how="inner")
.loc[:, [
"report_date", assn_col,
"state", "county",
"state_id_fips", "county_id_fips"
]]
.drop_duplicates()
)
def add_geometries(df, census_gdf, dissolve=False, dissolve_by=None):
"""
Merge census geometries into dataframe on county_id_fips, optionally dissolving.
Merge the US Census county-level geospatial information into the DataFrame df
based on the the column county_id_fips (in df), which corresponds to the column
GEOID10 in census_gdf. Also bring in the population and area of the counties,
summing as necessary in the case of dissolved geometries.
Args:
df (pandas.DataFrame): A DataFrame containing a county_id_fips column.
census_gdf (geopandas.GeoDataFrame): A GeoDataFrame based on the US Census
demographic profile (DP1) data at county resolution, with the original
column names as published by US Census.
dissolve (bool): If True, dissolve individual county geometries into larger
service territories.
dissolve_by (list): The columns to group by in the dissolve. For example,
dissolve_by=["report_date", "utility_id_eia"] might provide annual utility
service territories, while ["report_date", "balancing_authority_id_eia"]
would provide annual balancing authority territories.
Returns:
geopandas.GeoDataFrame
"""
out_gdf = (
census_gdf[["geoid10", "namelsad10", "dp0010001", "geometry"]]
.rename(columns={
"geoid10": "county_id_fips",
"namelsad10": "county_name_census",
"dp0010001": "population",
})
# Calculate county areas using cylindrical equal area projection:
.assign(area_km2=lambda x: x.geometry.to_crs(epsg=6933).area / 1e6)
.merge(df, how="right")
)
if dissolve is True:
# Don't double-count duplicated counties, if any.
out_gdf = out_gdf.drop_duplicates(subset=dissolve_by + ["county_id_fips", ])
# Sum these numerical columns so we can merge with dissolved geometries
summed = (
out_gdf.groupby(dissolve_by)[["population", "area_km2"]]
.sum().reset_index()
)
out_gdf = (
out_gdf.dissolve(by=dissolve_by)
.drop([
"county_id_fips",
"county",
"county_name_census",
"state",
"state_id_fips",
"population",
"area_km2",
], axis="columns")
.reset_index()
.merge(summed)
)
return out_gdf
def get_territory_geometries(ids,
assn,
assn_col,
st_eia861,
census_gdf,
limit_by_state=True,
dissolve=False):
"""
Compile service territory geometries based on county_id_fips.
Calls ``get_territory_fips`` to generate the list of counties associated with
each entity identified by ``ids``, and then merges in the corresponding county
geometries from the US Census DP1 data passed in via ``census_gdf``.
Optionally dissolve all of the county level geometries into a single geometry for
each combination of entity and year.
Note:
Dissolving geometires is a costly operation, and may take half an hour or more
if you are processing all entities for all years. Dissolving also means that all
the per-county information will be lost, rendering the output inappropriate for
use in many analyses. Dissolving is mostly useful for generating visualizations.
Args:
ids (iterable of ints): A collection of EIA balancing authority IDs.
assn (pandas.DataFrame): Association table, relating ``report_date``,
``state``, and ``utility_id_eia`` to each other, as well as the
column indicated by ``assn_col`` -- if it's not ``utility_id_eia``.
assn_col (str): Label of the dataframe column in ``assn`` that contains
the ID of the entities of interest. Should probably be either
``balancing_authority_id_eia`` or ``utility_id_eia``.
st_eia861 (pandas.DataFrame): The EIA 861 Service Territory table.
census_gdf (geopandas.GeoDataFrame): The US Census DP1 county-level geometries
as returned by pudl.output.censusdp1tract.get_layer("county").
limit_by_state (bool): Whether to require that the counties associated
with the balancing authority are inside a state that has also been
seen in association with the balancing authority and the utility
whose service territory contians the county.
dissolve (bool): If False, each record in the compiled territory will correspond
to a single county, with a county-level geometry, and there will be many
records enumerating all the counties associated with a given
balancing_authority_id_eia in each year. If dissolve=True, all of the
county-level geometries for each utility in each year will be merged
together ("dissolved") resulting in a single geometry and record for each
balancing_authority-year.
Returns:
geopandas.GeoDataFrame
"""
return (
get_territory_fips(
ids=ids,
assn=assn,
assn_col=assn_col,
st_eia861=st_eia861,
limit_by_state=limit_by_state,
)
.pipe(
add_geometries,
census_gdf,
dissolve=dissolve,
dissolve_by=["report_date", assn_col]
)
)
def compile_geoms(pudl_out,
census_counties,
entity_type, # "ba" or "util"
dissolve=False,
limit_by_state=True,
save=True):
"""
Compile all available utility or balancing authority geometries.
Args:
pudl_out (pudl.output.pudltabl.PudlTabl): A PUDL output object, which will
be used to extract and cache the EIA 861 tables.
census_counties (geopandas.GeoDataFrame): A GeoDataFrame containing the county
level US Census DP1 data and county geometries.
entity_type (str): The type of service territory geometry to compile. Must be
either "ba" (balancing authority) or "util" (utility).
dissolve (bool): Whether to dissolve the compiled geometries to the
utility/balancing authority level, or leave them as counties.
limit_by_state (bool): Whether to limit included counties to those with
observed EIA 861 data in association with the state and utility/balancing
authority.
save (bool): If True, save the compiled GeoDataFrame as a GeoParquet file before
returning. Especially useful in the case of dissolved geometries, as they
are computationally expensive.
Returns:
geopandas.GeoDataFrame
"""
logger.info(
"Compiling %s geometries with dissolve=%s and limit_by_state=%s.",
entity_type, dissolve, limit_by_state
)
if entity_type == "ba":
ids = pudl_out.balancing_authority_eia861().balancing_authority_id_eia.unique()
assn = pudl_out.balancing_authority_assn_eia861()
assn_col = "balancing_authority_id_eia"
elif entity_type == "util":
ids = get_all_utils(pudl_out).utility_id_eia.unique()
assn = pudl_out.utility_assn_eia861()
assn_col = "utility_id_eia"
else:
raise ValueError(f"Got {entity_type=}, but need either 'ba' or 'util'")
# Identify all Utility IDs with service territory information
geom = get_territory_geometries(
ids=ids,
assn=assn,
assn_col=assn_col,
st_eia861=pudl_out.service_territory_eia861(),
census_gdf=census_counties,
limit_by_state=limit_by_state,
dissolve=dissolve,
)
if save:
_save_geoparquet(
geom,
entity_type=entity_type,
dissolve=dissolve,
limit_by_state=limit_by_state
)
return geom
def _save_geoparquet(gdf, entity_type, dissolve, limit_by_state):
# For filenames based on input args:
dissolved = ""
if dissolve:
dissolved = "_dissolved"
else:
# States & counties only remain at this point if we didn't dissolve
for col in ("county_id_fips", "state_id_fips"):
# pandas.NA values are not compatible with Parquet Strings yet.
gdf[col] = gdf[col].fillna("")
limited = ""
if limit_by_state:
limited = "_limited"
# Save the geometries to a GeoParquet file
fn = f"{entity_type}_geom{limited+dissolved}.pq"
gdf.to_parquet(fn, index=False)
################################################################################
# Functions for visualizing the service territory geometries
################################################################################
def plot_historical_territory(gdf, id_col, id_val):
"""
Plot all the historical geometries defined for the specified entity.
This is useful for exploring how a particular entity's service territory has evolved
over time, or for identifying individual missing or inaccurate territories.
Args:
gdf (geopandas.GeoDataFrame): A geodataframe containing geometries pertaining
electricity planning areas. Can be broken down by county FIPS code, or
have a single record containing a geometry for each combination of
report_date and the column being used to select planning areas (see
below).
id_col (str): The label of a column in gdf that identifies the planning area
to be visualized, like utility_id_eia, balancing_authority_id_eia, or
balancing_authority_code_eia.
id_val (str or int): The value identifying the
Returns:
None
"""
if id_col not in gdf.columns:
raise ValueError(f"The input id_col {id_col} doesn't exist in this GDF.")
logger.info("Plotting historical territories for %s==%s.", id_col, id_val)
# Pare down the GDF so this all goes faster
entity_gdf = gdf[gdf[id_col] == id_val]
if "county_id_fips" in entity_gdf.columns:
entity_gdf = entity_gdf.drop_duplicates(
subset=["report_date", "county_id_fips"])
entity_gdf["report_year"] = entity_gdf.report_date.dt.year
logger.info(
"Plotting service territories of %s %s records.",
len(entity_gdf), id_col
)
# Create a grid of subplots sufficient to hold all the years:
years = entity_gdf.report_year.sort_values().unique()
ncols = 5
nrows = math.ceil(len(years) / ncols)
fig, axes = plt.subplots(
ncols=ncols, nrows=nrows, figsize=(15, 3 * nrows),
sharex=True, sharey=True, facecolor="white")
fig.suptitle(f"{id_col} == {id_val}")
for year, ax in zip(years, axes.flat):
ax.set_title(f"{year}")
ax.set_xticks([])
ax.set_yticks([])
year_gdf = entity_gdf.loc[entity_gdf.report_year == year]
year_gdf.plot(ax=ax, linewidth=0.1)
plt.show()
def plot_all_territories(gdf,
report_date,
respondent_type=("balancing_authority", "utility"),
color="black",
alpha=0.25,
basemap=True):
"""
Plot all of the planning areas of a given type for a given report date.
Todo:
This function needs to be made more general purpose, and less
entangled with the FERC 714 data.
Args:
gdf (geopandas.GeoDataFrame): GeoDataFrame containing planning area
geometries, organized by respondent_id_ferc714 and report_date.
report_date (datetime): A Datetime indicating what year's planning
areas should be displayed.
respondent_type (str or iterable): Type of respondent whose planning
areas should be displayed. Either "utility" or
"balancing_authority" or an iterable collection containing both.
color (str): Color to use for the planning areas.
alpha (float): Transparency to use for the planning areas.
basemap (bool): If true, use the OpenStreetMap tiles for context.
Returns:
matplotlib.axes.Axes
"""
unwanted_respondent_ids = ( # noqa: F841 variable is used, in df.query() below
112, # Alaska
133, # Alaska
178, # Hawaii
301, # PJM Dupe
302, # PJM Dupe
303, # PJM Dupe
304, # PJM Dupe
305, # PJM Dupe
306, # PJM Dupe
)
if isinstance(respondent_type, str):
respondent_type = (respondent_type, )
plot_gdf = (
gdf.query("report_date==@report_date")
.query("respondent_id_ferc714 not in @unwanted_respondent_ids")
.query("respondent_type in @respondent_type")
)
ax = plot_gdf.plot(figsize=(20, 20), color=color, alpha=alpha, linewidth=1)
plt.title(f"FERC 714 {', '.join(respondent_type)} planning areas for {report_date}")
if basemap:
ctx.add_basemap(ax)
plt.show()
return ax
################################################################################
# Functions that provide a CLI to the service territory module
################################################################################
def parse_command_line(argv):
"""
Parse script command line arguments. See the -h option.
Args:
argv (list): command line arguments including caller file name.
Returns:
dict: A dictionary mapping command line arguments to their values.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-d",
"--dissolve",
dest="dissolve",
action="store_true",
default=False,
help="Dissolve county level geometries to utility or balancing authorities",
)
return parser.parse_args(argv[1:])
def main():
"""Compile historical utility and balancing area territories."""
# Display logged output from the PUDL package:
pudl_logger = logging.getLogger("pudl")
log_format = '%(asctime)s [%(levelname)8s] %(name)s:%(lineno)s %(message)s'
coloredlogs.install(fmt=log_format, level='INFO', logger=pudl_logger)
args = parse_command_line(sys.argv)
pudl_settings = pudl.workspace.setup.get_defaults()
pudl_engine = sa.create_engine(pudl_settings['pudl_db'])
pudl_out = pudl.output.pudltabl.PudlTabl(pudl_engine)
# Load the US Census DP1 county data:
county_gdf = pudl.output.censusdp1tract.get_layer(
layer="county", pudl_settings=pudl_settings
)
kwargs_dicts = [
{"entity_type": "util", "limit_by_state": False},
{"entity_type": "util", "limit_by_state": True},
{"entity_type": "ba", "limit_by_state": True},
{"entity_type": "ba", "limit_by_state": False},
]
for kwargs in kwargs_dicts:
_ = compile_geoms(
pudl_out,
census_counties=county_gdf,
dissolve=args.dissolve,
**kwargs,
)
if __name__ == "__main__":
sys.exit(main())
| mit |
weaponsjtu/Kaggle_xBle | gen_ensemble.py | 1 | 19995 | ###
# ensemble.py
# author: Weipeng Zhang
#
#
# 1. check each weight by hyperopt
# 2. apply the weight to train/test
###
from sklearn.metrics import mean_squared_error as MSE
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge
import cPickle as pickle
import numpy as np
import pandas as pd
import sys,os,time
import multiprocessing
from hyperopt import fmin, tpe, hp, Trials, STATUS_OK, pyll
from hyperopt.mongoexp import MongoTrials
from param import config
from utils import *
def add_prior_models(model_library):
#prior_models = {
# 'xgboost-art@1': {
# 'weight': 0.463,
# 'pow_weight': 0.01,
# },
# 'xgboost-art@2': {
# 'weight': 0.463,
# 'pow_weight': 0.8,
# },
# 'xgboost-art@3': {
# 'weight': 0.463,
# 'pow_weight': 0.045,
# 'pow_weight1': 0.055,
# },
# 'xgboost-art@4': {
# 'weight': 0.463,
# 'pow_weight': 0.98,
# },
# 'xgboost-art@5': {
# 'weight': 0.463,
# 'pow_weight': 1,
# },
# 'xgboost-art@6': {
# 'weight': 0.47,
# 'pow_weight': 1,
# },
# }
prior_models = {}
for i in range(1, 5):
model = 'xgboost-art@%d'%i
prior_models[model] = {'weight': 0.463, 'pow_weight': 0.01 * i}
feat_names = config.feat_names
model_list = config.model_list
for iter in range(config.kiter):
for fold in range(config.kfold):
path = "%s/iter%d/fold%d" %(config.data_folder, iter, fold)
with open("%s/label_encode_xgboost-art.pred.pkl" %path, 'rb') as f:
p1 = pickle.load(f)
with open("%s/dictvec_xgboost-art.pred.pkl" %path, 'rb') as f:
p2 = pickle.load(f)
for model in prior_models.keys():
weight = prior_models[model]['weight']
pow_weight1 = prior_models[model]['pow_weight']
pow_weight2 = pow_weight1
if prior_models[model].has_key('pow_weight1'):
pow_weight2 = prior_models[model]['pow_weight1']
pred = weight * (p1**pow_weight1) + (1-weight) * (p2**pow_weight2)
with open("%s/%s.pred.pkl" %(path, model), 'wb') as f:
pickle.dump(pred, f, -1)
path = "%s/all" %(config.data_folder)
with open("%s/label_encode_xgboost-art.pred.pkl" %path, 'rb') as f:
p1 = pickle.load(f)
with open("%s/dictvec_xgboost-art.pred.pkl" %path, 'rb') as f:
p2 = pickle.load(f)
for model in prior_models.keys():
weight = prior_models[model]['weight']
pow_weight1 = prior_models[model]['pow_weight']
pow_weight2 = pow_weight1
if prior_models[model].has_key('pow_weight1'):
pow_weight2 = prior_models[model]['pow_weight1']
pred = weight * (p1**pow_weight1) + (1-weight) * (p2**pow_weight2)
with open("%s/%s.pred.pkl" %(path, model), 'wb') as f:
pickle.dump(pred, f, -1)
for model in prior_models.keys():
model_library.append(model)
return model_library
def ensemble_algorithm(p1, p2, weight):
#return (p1 + weight*p2) / (1+weight)
### weighted linear combine ###
#return 2.0 / (weight*(1.0/p1) + (1-weight)*(1.0/p2))
#return (weight * np.log(p1) + (1-weight) * np.log(p2))
return weight * p1 + (1-weight) * p2
def ensemble_selection_obj(param, model1_pred, model2_pred, labels, num_valid_matrix):
weight = param['weight']
gini_cv = np.zeros((config.kiter, config.kfold), dtype=float)
for iter in range(config.kiter):
for fold in range(config.kfold):
p1 = model1_pred[iter, fold, :num_valid_matrix[iter, fold]]
p2 = model2_pred[iter, fold, :num_valid_matrix[iter, fold]]
y_pred = ensemble_algorithm(p1, p2, weight)
y_true = labels[iter, fold, :num_valid_matrix[iter, fold]]
score = ml_score(y_true, y_pred)
gini_cv[iter][fold] = score
gini_mean = np.mean(gini_cv)
return -gini_mean
def ensemble_selection():
# load feat, labels and pred
feat_names = config.feat_names
model_list = config.model_list
# load model library
model_library = gen_model_library()
model_num = len(model_library)
print model_library
# num valid matrix
num_valid_matrix = np.zeros((config.kiter, config.kfold), dtype=int)
# load valid labels
valid_labels = np.zeros((config.kiter, config.kfold, 50000), dtype=float)
for iter in range(config.kiter):
for fold in range(config.kfold):
path = "%s/iter%d/fold%d" % (config.data_folder, iter, fold)
label_file = "%s/valid.%s.feat.pkl" %(path, feat_names[0])
with open(label_file, 'rb') as f:
[x_val, y_true] = pickle.load(f)
valid_labels[iter, fold, :y_true.shape[0]] = y_true
num_valid_matrix[iter][fold] = y_true.shape[0]
maxNumValid = np.max(num_valid_matrix)
# load all predictions, cross validation
# compute model's gini cv score
gini_cv = []
model_valid_pred = np.zeros((model_num, config.kiter, config.kfold, maxNumValid), dtype=float)
for mid in range(model_num):
gini_cv_tmp = np.zeros((config.kiter, config.kfold), dtype=float)
for iter in range(config.kiter):
for fold in range(config.kfold):
path = "%s/iter%d/fold%d" % (config.data_folder, iter, fold)
pred_file = "%s/%s.pred.pkl" %(path, model_library[mid])
with open(pred_file, 'rb') as f:
y_pred = pickle.load(f)
model_valid_pred[mid, iter, fold, :num_valid_matrix[iter, fold]] = y_pred
score = ml_score(valid_labels[iter, fold, :num_valid_matrix[iter, fold]], y_pred)
gini_cv_tmp[iter][fold] = score
gini_cv.append(np.mean(gini_cv_tmp))
# sort the model by their cv mean score
gini_cv = np.array(gini_cv)
sorted_model = gini_cv.argsort()[::-1] # large to small
for mid in sorted_model:
print model_library[mid]
print len(sorted_model)
# boosting ensemble
# 1. initialization, use the max score model
model_pred_tmp = np.zeros((config.kiter, config.kfold, maxNumValid), dtype=float)
for iter in range(config.kiter):
for fold in range(config.kfold):
model_pred_tmp[iter, fold, :num_valid_matrix[iter][fold]] = model_valid_pred[sorted_model[0], iter, fold, :num_valid_matrix[iter][fold]]
print "Init with best model, ml_score %f, Model %s" %(np.max(gini_cv), model_library[sorted_model[0]])
# 2. greedy search
best_model_list = []
best_weight_list = []
if config.nthread > 1:
manager = multiprocessing.Manager()
best_gini_tmp = manager.list()
best_gini_tmp.append( np.max(gini_cv) )
best_weight_tmp = manager.list()
best_weight_tmp.append(-1)
best_model_tmp = manager.list()
best_model_tmp.append(-1)
else:
best_gini = np.max(gini_cv)
best_weight = None
best_model = None
ensemble_iter = 0
while True:
iter_time = time.time()
ensemble_iter += 1
if config.nthread > 1:
best_model_tmp[0] = -1
model_id = 0
while model_id < len(sorted_model):
mp_list = []
for i in range(model_id, min(len(sorted_model), model_id + config.max_core)):
mp = EnsembleProcess(ensemble_iter, sorted_model[i] , model_library, sorted_model, model_pred_tmp, model_valid_pred, valid_labels, num_valid_matrix, best_gini_tmp, best_weight_tmp, best_model_tmp)
mp_list.append(mp)
model_id += config.max_core
for mp in mp_list:
mp.start()
for mp in mp_list:
mp.join()
best_gini = best_gini_tmp[0]
best_weight = best_weight_tmp[0]
best_model = best_model_tmp[0]
if best_model == -1:
best_model = None
# TODO
else:
for model in sorted_model:
print "ensemble iter %d, model (%d, %s)" %(ensemble_iter, model, model_library[model])
# jump for the first max model
#if ensemble_iter == 1 and model == sorted_model[0]:
# continue
obj = lambda param: ensemble_selection_obj(param, model_pred_tmp, model_valid_pred[model], valid_labels, num_valid_matrix)
param_space = {
'weight': hp.quniform('weight', 0, 1, 0.01),
}
trials = Trials()
#trials = MongoTrials('mongo://172.16.13.7:27017/ensemble/jobs', exp_key='exp%d_%d'%(ensemble_iter, model))
best_param = fmin(obj,
space = param_space,
algo = tpe.suggest,
max_evals = config.ensemble_max_evals,
trials = trials)
best_w = best_param['weight']
gini_cv_tmp = np.zeros((config.kiter, config.kfold), dtype=float)
for iter in range(config.kiter):
for fold in range(config.kfold):
p1 = model_pred_tmp[iter, fold, :num_valid_matrix[iter, fold]]
p2 = model_valid_pred[model, iter, fold, :num_valid_matrix[iter, fold]]
y_true = valid_labels[iter, fold, :num_valid_matrix[iter, fold]]
y_pred = ensemble_algorithm(p1, p2, best_w)
score = ml_score(y_true, y_pred)
gini_cv_tmp[iter, fold] = score
print "Iter %d, ml_score %f, Model %s, Weight %f" %(ensemble_iter, np.mean(gini_cv_tmp), model_library[model], best_w)
if (np.mean(gini_cv_tmp) - best_gini) >= 0.000001:
best_gini, best_model, best_weight = np.mean(gini_cv_tmp), model, best_w
#### single process
if best_model == None: #or best_weight > 0.9:
break
print "Best for Iter %d, ml_score %f, Model %s, Weight %f" %(ensemble_iter, best_gini, model_library[best_model], best_weight)
best_weight_list.append(best_weight)
best_model_list.append(best_model)
# reset the valid pred
for iter in range(config.kiter):
for fold in range(config.kfold):
p1 = model_pred_tmp[iter, fold, :num_valid_matrix[iter, fold]]
p2 = model_valid_pred[best_model, iter, fold, :num_valid_matrix[iter, fold]]
model_pred_tmp[iter, fold, :num_valid_matrix[iter][fold]] = ensemble_algorithm(p1, p2, best_weight)
best_model = None
print 'ensemble iter %d done!!!, cost time is %s' % (ensemble_iter, time.time() - iter_time)
# save best model list, every iteration
with open("%s/best_model_list" % config.data_folder, 'wb') as f:
pickle.dump([model_library, sorted_model, best_model_list, best_weight_list], f, -1)
def ensemble_prediction():
# load best model list
with open("%s/best_model_list" % config.data_folder, 'rb') as f:
[model_library, sorted_model, best_model_list, best_weight_list] = pickle.load(f)
# prediction, generate submission file
path = "%s/all" % config.data_folder
print "Init with (%s)" %(model_library[sorted_model[0]])
with open("%s/%s.pred.pkl" %(path, model_library[sorted_model[0]]), 'rb') as f:
y_pred = pickle.load(f)
# generate best single model submission
gen_subm(y_pred, 'sub/best_single.csv')
for i in range(len(best_model_list)):
model = best_model_list[i]
weight = best_weight_list[i]
print "(%s), %f" %(model_library[model], weight)
with open("%s/%s.pred.pkl" %(path, model_library[model]), 'rb') as f:
y_pred_tmp = pickle.load(f)
y_pred = ensemble_algorithm(y_pred, y_pred_tmp, weight)
# generate ensemble submission finally
gen_subm(y_pred)
class EnsembleProcess(multiprocessing.Process):
def __init__(self, ensemble_iter, model, model_library, sorted_model, model_pred_tmp, model_valid_pred, valid_labels, num_valid_matrix, best_gini, best_weight, best_model):
multiprocessing.Process.__init__(self)
self.ensemble_iter = ensemble_iter
self.model = model
self.model_library = model_library
self.sorted_model = sorted_model
self.model_pred_tmp = model_pred_tmp
self.model_valid_pred = model_valid_pred
self.valid_labels = valid_labels
self.num_valid_matrix = num_valid_matrix
self.best_gini = best_gini
self.best_weight = best_weight
self.best_model = best_model
def run(self):
print "ensemble iter %d, model (%d, %s)" %(self.ensemble_iter, self.model, self.model_library[self.model])
# jump for the first max model
#if self.ensemble_iter == 1 and self.model == self.sorted_model[0]:
# return
obj = lambda param: ensemble_selection_obj(param, self.model_pred_tmp, self.model_valid_pred[self.model], self.valid_labels, self.num_valid_matrix)
param_space = {
'weight': hp.quniform('weight', 0, 1, 0.01),
}
trials = Trials()
#trials = MongoTrials('mongo://172.16.13.7:27017/ensemble/jobs', exp_key='exp%d_%d'%(ensemble_iter, model))
best_param = fmin(obj,
space = param_space,
algo = tpe.suggest,
max_evals = config.ensemble_max_evals,
trials = trials)
best_w = best_param['weight']
gini_cv_tmp = np.zeros((config.kiter, config.kfold), dtype=float)
for iter in range(config.kiter):
for fold in range(config.kfold):
p1 = self.model_pred_tmp[iter, fold, :self.num_valid_matrix[iter, fold]]
p2 = self.model_valid_pred[self.model, iter, fold, :self.num_valid_matrix[iter, fold]]
y_true = self.valid_labels[iter, fold, :self.num_valid_matrix[iter, fold]]
y_pred = ensemble_algorithm(p1, p2, best_w)
score = ml_score(y_true, y_pred)
gini_cv_tmp[iter, fold] = score
print "Iter %d, ml_score %f, Model %s, Weight %f" %(self.ensemble_iter, np.mean(gini_cv_tmp), self.model_library[self.model], best_w)
if (np.mean(gini_cv_tmp) - self.best_gini[0]) >= 0.000001:
self.best_gini[0], self.best_model[0], self.best_weight[0] = np.mean(gini_cv_tmp), self.model, best_w
def ensemble_rank_average():
# load feat, labels and pred
feat_names = config.feat_names
model_list = config.model_list
# load model library
#model_library = gen_model_library()
model_library = ['label_xgb_linear_fix@6', 'label_xgb_fix@6', 'label_xgb_count@6', 'label_xgb_rank@6', 'label_lasagne@6', 'label_extratree@6', 'label_ridge@6', 'label_rgf@6', 'label_logistic@6']
for model in model_library:
with open('%s/all/%s.pred.pkl'%(config.data_folder, model), 'rb') as f:
y_pred = pickle.load(f)
gen_subm(y_pred, 'sub/final/%s.csv'%model)
return 0
#label_xgb_fix_log@6 0.999108398851
#label_xgb_linear_fix@6 0.979159034293
#label_xgb_fix@6 1.0
#label_xgb_fix@1 0.980509196355
#label_xgb_tree_log@2 0.979845717419
#label_xgb_tree_log@1 0.978312937118
#label_xgb_count@6 0.977797411688
#label_xgb_rank@6 0.928070928806
model_num = len(model_library)
print model_library
print model_num
# num valid matrix
num_valid_matrix = np.zeros((config.kiter, config.kfold), dtype=int)
# load valid labels
valid_labels = np.zeros((config.kiter, config.kfold, 50000), dtype=float)
for iter in range(config.kiter):
for fold in range(config.kfold):
path = "%s/iter%d/fold%d" % (config.data_folder, iter, fold)
label_file = "%s/valid.%s.feat.pkl" %(path, feat_names[0])
with open(label_file, 'rb') as f:
[x_val, y_true] = pickle.load(f)
valid_labels[iter, fold, :y_true.shape[0]] = y_true
num_valid_matrix[iter][fold] = y_true.shape[0]
maxNumValid = np.max(num_valid_matrix)
print "valid labels done!!!"
# load all predictions, cross validation
# compute model's gini cv score
gini_cv = []
model_valid_pred = np.zeros((model_num, config.kiter, config.kfold, maxNumValid), dtype=float)
for mid in range(model_num):
gini_cv_tmp = np.zeros((config.kiter, config.kfold), dtype=float)
for iter in range(config.kiter):
for fold in range(config.kfold):
path = "%s/iter%d/fold%d" % (config.data_folder, iter, fold)
pred_file = "%s/%s.pred.pkl" %(path, model_library[mid])
with open(pred_file, 'rb') as f:
y_pred = pickle.load(f)
model_valid_pred[mid, iter, fold, :num_valid_matrix[iter, fold]] = y_pred
score = ml_score(valid_labels[iter, fold, :num_valid_matrix[iter, fold]], y_pred)
gini_cv_tmp[iter][fold] = score
gini_cv.append(np.mean(gini_cv_tmp))
print "gini cv done!!!"
# sort the model by their cv mean score
gini_cv = np.array(gini_cv)
sorted_model = gini_cv.argsort()[::-1]
### rank average ###
best_model_end = 0
best_gini = 0
for model_end_id in range(len(sorted_model)):
gini_cv_tmp = np.zeros((config.kiter, config.kfold), dtype=float)
for iter in range(config.kiter):
for fold in range(config.kfold):
pred_tmp = np.zeros((num_valid_matrix[iter, fold]), dtype=float)
#pred_tmp = np.ones((num_valid_matrix[iter, fold]), dtype=float)
for mid in range(model_end_id+1):
y_pred = model_valid_pred[sorted_model[mid], iter, fold, :num_valid_matrix[iter, fold]]
#pred_tmp += (y_pred.argsort() + 1)*1.0 / len(y_pred)
pred_tmp += y_pred # log mean, harmonic mean
#pred_tmp *= y_pred
#pred_tmp = (model_end_id + 1)*1.0 / pred_tmp
pred_tmp /= (model_end_id + 1)
#pred_tmp = np.power(pred_tmp, 1.0/(model_end_id + 1))
gini_cv_tmp[iter, fold] = ml_score( valid_labels[iter, fold, :num_valid_matrix[iter, fold]], pred_tmp)
if np.mean(gini_cv_tmp) > best_gini:
best_model_end = model_end_id
best_gini = np.mean(gini_cv_tmp)
print "model end id %d, best_gini %f" %(model_end_id, np.mean(gini_cv_tmp))
print best_model_end
print best_gini
#path = "%s/all/%s.pred.pkl" %(config.data_folder, model_library[ sorted_model[0] ])
#with open(path, 'rb') as f:
# y_pred = pickle.load(f)
# y_pred = (y_pred.argsort() + 1)*1.0 / len(y_pred)
#for mid in range(1, best_model_end + 1):
# path = "%s/all/%s.pred.pkl" %(config.data_folder, model_library[ sorted_model[mid] ])
# with open(path, 'rb') as f:
# y_pred_tmp = pickle.load(f)
# y_pred += (y_pred_tmp.argsort() + 1)*1.0 / len(y_pred)
# #y_pred += y_pred_tmp
#y_pred = y_pred * 1.0 / (best_model_end + 1)
#gen_subm(y_pred, 'sub/model_rank_avg.csv')
#TODO
if __name__ == "__main__":
start_time = time.time()
flag = sys.argv[1]
print "start ", flag
print "Code start at %s" %time.ctime()
if flag == "ensemble":
ensemble_selection()
if flag == "submission":
ensemble_prediction()
if flag == "rankavg":
ensemble_rank_average()
end_time = time.time()
print "cost time %f" %( (end_time - start_time)/1000 )
| gpl-2.0 |
wzbozon/statsmodels | statsmodels/examples/ex_kernel_semilinear_dgp.py | 33 | 4969 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 06 09:50:54 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
#from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.kernel_extras as smke
import statsmodels.sandbox.nonparametric.dgp_examples as dgp
class UnivariateFunc1a(dgp.UnivariateFunc1):
def het_scale(self, x):
return 0.5
seed = np.random.randint(999999)
#seed = 430973
#seed = 47829
seed = 648456 #good seed for het_scale = 0.5
print(seed)
np.random.seed(seed)
nobs, k_vars = 300, 3
x = np.random.uniform(-2, 2, size=(nobs, k_vars))
xb = x.sum(1) / 3 #beta = [1,1,1]
k_vars_lin = 2
x2 = np.random.uniform(-2, 2, size=(nobs, k_vars_lin))
funcs = [#dgp.UnivariateFanGijbels1(),
#dgp.UnivariateFanGijbels2(),
#dgp.UnivariateFanGijbels1EU(),
#dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))
UnivariateFunc1a(x=xb)
]
res = []
fig = plt.figure()
for i,func in enumerate(funcs):
#f = func()
f = func
y = f.y + x2.sum(1)
model = smke.SemiLinear(y, x2, x, 'ccc', k_vars_lin)
mean, mfx = model.fit()
ax = fig.add_subplot(1, 1, i+1)
f.plot(ax=ax)
xb_est = np.dot(model.exog, model.b)
sortidx = np.argsort(xb_est) #f.x)
ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, label='est. mean')
# ax.plot(f.x, mean0, color='g', lw=2, label='est. mean')
ax.legend(loc='upper left')
res.append((model, mean, mfx))
print('beta', model.b)
print('scale - est', (y - (xb_est+mean)).std())
print('scale - dgp realised, true', (y - (f.y_true + x2.sum(1))).std(), \
2 * f.het_scale(1))
fittedvalues = xb_est + mean
resid = np.squeeze(model.endog) - fittedvalues
print('corrcoef(fittedvalues, resid)', np.corrcoef(fittedvalues, resid)[0,1])
print('variance of components, var and as fraction of var(y)')
print('fitted values', fittedvalues.var(), fittedvalues.var() / y.var())
print('linear ', xb_est.var(), xb_est.var() / y.var())
print('nonparametric', mean.var(), mean.var() / y.var())
print('residual ', resid.var(), resid.var() / y.var())
print('\ncovariance decomposition fraction of var(y)')
print(np.cov(fittedvalues, resid) / model.endog.var(ddof=1))
print('sum', (np.cov(fittedvalues, resid) / model.endog.var(ddof=1)).sum())
print('\ncovariance decomposition, xb, m, resid as fraction of var(y)')
print(np.cov(np.column_stack((xb_est, mean, resid)), rowvar=False) / model.endog.var(ddof=1))
fig.suptitle('Kernel Regression')
fig.show()
alpha = 0.7
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(f.x[sortidx], f.y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
sortidx = np.argsort(xb_est + mean)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(f.x[sortidx], y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(f.x[sortidx], (xb_est + mean)[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Semilinear Model - observed and total fitted')
fig = plt.figure()
# ax = fig.add_subplot(1, 2, 1)
# ax.plot(f.x, f.y, 'o', color='b', lw=2, alpha=alpha, label='observed')
# ax.plot(f.x, f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
# ax.plot(f.x, mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean')
# ax.legend(loc='upper left')
sortidx0 = np.argsort(xb)
ax = fig.add_subplot(1, 2, 1)
ax.plot(f.y[sortidx0], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.y_true[sortidx0], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(mean[sortidx0], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Single Index Model (sorted by true xb)')
ax = fig.add_subplot(1, 2, 2)
ax.plot(y - xb_est, 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Single Index Model (nonparametric)')
plt.figure()
plt.plot(y, xb_est+mean, '.')
plt.title('observed versus fitted values')
plt.show()
| bsd-3-clause |
avistous/QSTK | qstkstrat/strategies.py | 2 | 9521 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Sep 27, 2011
@author: John Cornwell
@contact: JohnWCornwellV@gmail.com
@summary: Various simple trading strategies to generate allocations.
'''
''' Python imports '''
import datetime as dt
from math import sqrt
''' 3rd party imports '''
import numpy as np
import pandas as pand
''' QSTK imports '''
import qstkutil.tsutil as tsu
def stratGiven( dtStart, dtEnd, dFuncArgs ):
"""
@summary Simplest strategy, weights are provided through args.
@param dtStart: Start date for portfolio
@param dtEnd: End date for portfolio
@param dFuncArgs: Dict of function args passed to the function
@return DataFrame corresponding to the portfolio allocations
"""
if not dFuncArgs.has_key('dmPrice'):
print 'Error: Strategy requires dmPrice information'
return
if not dFuncArgs.has_key('lfWeights'):
print 'Error: Strategy requires weight information'
return
dmPrice = dFuncArgs['dmPrice']
lfWeights = dFuncArgs['lfWeights']
''' Generate two allocations, one for the start day, one for the end '''
naAlloc = np.array( lfWeights ).reshape(1,-1)
dfAlloc = pand.DataFrame( index=[dtStart], data=naAlloc, columns=(dmPrice.columns) )
dfAlloc = dfAlloc.append( pand.DataMatrix(index=[dtEnd], data=naAlloc, columns=dmPrice.columns))
dfAlloc['_CASH'] = 0.0
return dfAlloc
def strat1OverN( dtStart, dtEnd, dFuncArgs ):
"""
@summary Evenly distributed strategy.
@param dtStart: Start date for portfolio
@param dtEnd: End date for portfolio
@param dFuncArgs: Dict of function args passed to the function
@return DataFrame corresponding to the portfolio allocations
"""
if not dFuncArgs.has_key('dmPrice'):
print 'Error: Strategy requires dmPrice information'
return
dmPrice = dFuncArgs['dmPrice']
lNumSym = len(dmPrice.columns)
''' Generate two allocations, one for the start day, one for the end '''
naAlloc = (np.array( np.ones(lNumSym) ) * (1.0 / lNumSym)).reshape(1,-1)
dfAlloc = pand.DataMatrix( index=[dtStart], data=naAlloc, columns=(dmPrice.columns) )
dfAlloc = dfAlloc.append( pand.DataMatrix(index=[dtEnd], data=naAlloc, columns=dmPrice.columns))
dfAlloc['_CASH'] = 0.0
return dfAlloc
def stratMark( dtStart, dtEnd, dFuncArgs ):
"""
@summary Markovitz strategy, generates a curve and then chooses a point on it.
@param dtStart: Start date for portfolio
@param dtEnd: End date for portfolio
@param dFuncArgs: Dict of function args passed to the function
@return DataFrame corresponding to the portfolio allocations
"""
if not dFuncArgs.has_key('dmPrice'):
print 'Error:', stratMark.__name__, 'requires dmPrice information'
return
if not dFuncArgs.has_key('sPeriod'):
print 'Error:', stratMark.__name__, 'requires rebalancing period'
return
if not dFuncArgs.has_key('lLookback'):
print 'Error:', stratMark.__name__, 'requires lookback'
return
if not dFuncArgs.has_key('sMarkPoint'):
print 'Error:', stratMark.__name__, 'requires markowitz point to choose'
return
''' Optional variables '''
if not dFuncArgs.has_key('bAddAlpha'):
bAddAlpha = False
else:
bAddAlpha = dFuncArgs['bAddAlpha']
dmPrice = dFuncArgs['dmPrice']
sPeriod = dFuncArgs['sPeriod']
lLookback = dFuncArgs['lLookback']
sMarkPoint = dFuncArgs['sMarkPoint']
''' Select rebalancing dates '''
drNewRange = pand.DateRange(dtStart, dtEnd, timeRule=sPeriod) + pand.DateOffset(hours=16)
dfAlloc = pand.DataMatrix()
''' Go through each rebalance date and calculate an efficient frontier for each '''
for i, dtDate in enumerate(drNewRange):
dtStart = dtDate - pand.DateOffset(days=lLookback)
if( dtStart < dmPrice.index[0] ):
print 'Error, not enough data to rebalance'
continue
naRets = dmPrice.ix[ dtStart:dtDate ].values.copy()
tsu.returnize1(naRets)
tsu.fillforward(naRets)
tsu.fillbackward(naRets)
''' Add alpha to returns '''
if bAddAlpha:
if i < len(drNewRange) - 1:
naFutureRets = dmPrice.ix[ dtDate:drNewRange[i+1] ].values.copy()
tsu.returnize1(naFutureRets)
tsu.fillforward(naFutureRets)
tsu.fillbackward(naFutureRets)
naAvg = np.mean( naFutureRets, axis=0 )
''' make a mix of past/future rets '''
for i in range( naRets.shape[0] ):
naRets[i,:] = (naRets[i,:] + (naAvg*0.05)) / 1.05
''' Generate the efficient frontier '''
(lfReturn, lfStd, lnaPortfolios) = getFrontier( naRets, fUpper=0.2, fLower=0.01 )
lInd = 0
'''
plt.clf()
plt.plot( lfStd, lfReturn)'''
if( sMarkPoint == 'Sharpe'):
''' Find portfolio with max sharpe '''
fMax = -1E300
for i in range( len(lfReturn) ):
fShrp = (lfReturn[i]-1) / (lfStd[i])
if fShrp > fMax:
fMax = fShrp
lInd = i
'''
plt.plot( [lfStd[lInd]], [lfReturn[lInd]], 'ro')
plt.draw()
time.sleep(2)
plt.show()'''
elif( sMarkPoint == 'MinVar'):
''' use portfolio with minimum variance '''
fMin = 1E300
for i in range( len(lfReturn) ):
if lfStd[i] < fMin:
fMin = lfStd[i]
lInd = i
elif( sMarkPoint == 'MaxRet'):
''' use Portfolio with max returns (not really markovitz) '''
lInd = len(lfReturn)-1
elif( sMarkPoint == 'MinRet'):
''' use Portfolio with min returns (not really markovitz) '''
lInd = 0
else:
print 'Warning: invalid sMarkPoint'''
return
''' Generate allocation based on selected portfolio '''
naAlloc = (np.array( lnaPortfolios[lInd] ).reshape(1,-1) )
dmNew = pand.DataMatrix( index=[dtDate], data=naAlloc, columns=(dmPrice.columns) )
dfAlloc = dfAlloc.append( dmNew )
dfAlloc['_CASH'] = 0.0
return dfAlloc
def stratMarkSharpe( dtStart, dtEnd, dFuncArgs ):
"""
@summary Calls stratMark with sharpe ratio point.
@param dtStart: Start date for portfolio
@param dtEnd: End date for portfolio
@param dFuncArgs: Dict of function args passed to the function
@return DataFrame corresponding to the portfolio allocations
"""
dFuncArgs['sMarkPoint'] = 'Sharpe'
return stratMark( dtStart, dtEnd, dFuncArgs )
def stratMarkLowVar( dtStart, dtEnd, dFuncArgs ):
"""
@summary Calls stratMark and uses lowest variance ratio point.
@param dtStart: Start date for portfolio
@param dtEnd: End date for portfolio
@param dFuncArgs: Dict of function args passed to the function
@return DataFrame corresponding to the portfolio allocations
"""
dFuncArgs['sMarkPoint'] = 'MinVar'
return stratMark( dtStart, dtEnd, dFuncArgs )
def stratMarkMaxRet( dtStart, dtEnd, dFuncArgs ):
"""
@summary Calls stratMark and uses maximum returns.
@param dtStart: Start date for portfolio
@param dtEnd: End date for portfolio
@param dFuncArgs: Dict of function args passed to the function
@return DataFrame corresponding to the portfolio allocations
"""
dFuncArgs['sMarkPoint'] = 'MaxRet'
return stratMark( dtStart, dtEnd, dFuncArgs )
def stratMarkMinRet( dtStart, dtEnd, dFuncArgs ):
"""
@summary Calls stratMark and uses minimum returns.
@param dtStart: Start date for portfolio
@param dtEnd: End date for portfolio
@param dFuncArgs: Dict of function args passed to the function
@return DataFrame corresponding to the portfolio allocations
"""
dFuncArgs['sMarkPoint'] = 'MinRet'
return stratMark( dtStart, dtEnd, dFuncArgs )
def stratMarkSharpeAlpha( dtStart, dtEnd, dFuncArgs ):
"""
@summary Calls stratMark and chooses the highest share point, uses future knowlege (alpha).
@param dtStart: Start date for portfolio
@param dtEnd: End date for portfolio
@param dFuncArgs: Dict of function args passed to the function
@return DataFrame corresponding to the portfolio allocations
"""
dFuncArgs['sMarkPoint'] = 'Sharpe'
dFuncArgs['bAddAlpha'] = True
return stratMark( dtStart, dtEnd, dFuncArgs )
def stratMarkMaxRetAlpha( dtStart, dtEnd, dFuncArgs ):
"""
@summary Calls stratMark chooses the highest returns point, uses future knowlege (alpha).
@param dtStart: Start date for portfolio
@param dtEnd: End date for portfolio
@param dFuncArgs: Dict of function args passed to the function
@return DataFrame corresponding to the portfolio allocations
"""
dFuncArgs['sMarkPoint'] = 'MaxRet'
dFuncArgs['bAddAlpha'] = True
return stratMark( dtStart, dtEnd, dFuncArgs )
| bsd-3-clause |
storpipfugl/airflow | airflow/hooks/presto_hook.py | 37 | 2626 | from builtins import str
from pyhive import presto
from pyhive.exc import DatabaseError
from airflow.hooks.dbapi_hook import DbApiHook
import logging
logging.getLogger("pyhive").setLevel(logging.INFO)
class PrestoException(Exception):
pass
class PrestoHook(DbApiHook):
"""
Interact with Presto through PyHive!
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(self.presto_conn_id)
return presto.connect(
host=db.host,
port=db.port,
username=db.login,
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema)
@staticmethod
def _strip_sql(sql):
return sql.strip().rstrip(';')
def get_records(self, hql, parameters=None):
"""
Get a set of records from Presto
"""
try:
return super(PrestoHook, self).get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
obj = eval(str(e))
raise PrestoException(obj['message'])
def get_first(self, hql, parameters=None):
"""
Returns only the first row, regardless of how many rows the query
returns.
"""
try:
return super(PrestoHook, self).get_first(
self._strip_sql(hql), parameters)
except DatabaseError as e:
obj = eval(str(e))
raise PrestoException(obj['message'])
def get_pandas_df(self, hql, parameters=None):
"""
Get a pandas dataframe from a sql query.
"""
import pandas
cursor = self.get_cursor()
cursor.execute(self._strip_sql(hql), parameters)
try:
data = cursor.fetchall()
except DatabaseError as e:
obj = eval(str(e))
raise PrestoException(obj['message'])
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame()
return df
def run(self, hql, parameters=None):
"""
Execute the statement against Presto. Can be used to create views.
"""
return super(PrestoHook, self).run(self._strip_sql(hql), parameters)
def insert_rows(self):
raise NotImplemented()
| apache-2.0 |
poldrack/myconnectome | myconnectome/taskfmri/encoding_model.py | 2 | 3311 | """
do encoding model across sessions
"""
import os,glob,sys,ctypes
import nibabel.gifti.giftiio
import numpy
import sklearn.linear_model
from myconnectome.utils.array_to_gifti import array_to_gifti_32k
basedir = os.environ['MYCONNECTOME_DIR']
datadir='/corral-repl/utexas/poldracklab/data/selftracking'
def get_codes():
f=open('contrast_annotation.txt')
header=f.readline().strip().split('\t')
nvars=len(header)-3
coding={}
lines=f.readlines()
for l in lines:
l_s=l.strip().split('\t')
tasknum=int(l_s[0])
contrastnum=int(l_s[1])
taskname=l_s[2]
codes=[]
for i in range(nvars):
try:
codes.append(int(l_s[i+3]))
except:
codes.append(0)
if not coding.has_key(tasknum):
coding[tasknum]={}
coding[tasknum][contrastnum]=codes
return coding,header[3:]
def get_files(coding,datadir):
files=[]
taskcodes=[]
for t in coding.keys():
for c in coding[t].keys():
tcfiles=glob.glob(os.path.join(datadir,'sub*/model/model%03d/task%03d*333.feat/stats_pipeline/zstat%03d.R.smoothed.func.gii'%(t,t,c)))
for f in tcfiles:
files.append(f)
taskcodes.append([t,c])
return files,taskcodes
def load_data(files):
contrastdata=numpy.zeros((len(files),32492*2))
for i in range(len(files)):
f=files[i]
rh=nibabel.gifti.giftiio.read(f).darrays[0].data
lh=nibabel.gifti.giftiio.read(f.replace('.R.','.L.')).darrays[0].data
contrastdata[i,:]=numpy.hstack((lh,rh))
return contrastdata
def get_design_matrix(coding,taskcodes):
desmtx=[]
for t in taskcodes:
desmtx.append(coding[t[0]][t[1]])
desmtx=numpy.array(desmtx)
return desmtx
if __name__=="__main__":
coding,names=get_codes()
files,taskcodes=get_files(coding,datadir)
contrastdata=load_data(files)
desmtx=get_design_matrix(coding,taskcodes)
desmtx=desmtx-numpy.mean(desmtx,0)
df = desmtx.shape[0] - desmtx.shape[1]
tstat_lasso=numpy.zeros((desmtx.shape[1],32492*2))
tstat=numpy.zeros((desmtx.shape[1],32492*2))
badctr=0
lm=sklearn.linear_model.LinearRegression()
lr=sklearn.linear_model.Lasso(alpha=0.01)
ctr=0
ctr2=0
for i in range(contrastdata.shape[1]):
if ctr==100:
ctr=0
ctr2+=1
print ctr2
else:
ctr+=1
ctr2+=1
y=contrastdata[:,i]-numpy.mean(contrastdata[:,i])
lr.fit(desmtx,y)
resid=y-desmtx.dot(lr.coef_)
sse=numpy.dot(resid,resid)/float(df)
tstat_lasso[:,i]=lr.coef_/sse
lm.fit(desmtx,y)
resid=y-desmtx.dot(lm.coef_)
sse=numpy.dot(resid,resid)/float(df)
tstat[:,i]=lm.coef_/sse
tstat[numpy.isnan(tstat)]=0
tstat_lasso[numpy.isnan(tstat_lasso)]=0
numpy.save(os.path.join(basedir,'task/encoding_tstat_lasso.npy'),tstat_lasso)
numpy.save(os.path.join(basedir,'task/encoding_tstat.npy'),tstat)
array_to_gifti_32k(tstat_lasso,os.path.join(basedir,'task/encoding_tstat_lasso'),names)
array_to_gifti_32k(tstat,os.path.join(basedir,'task/encoding_tstat'),names)
| mit |
stulp/dmpbbo | demos_cpp/dynamicalsystems/demoExponentialSystemWrapper.py | 1 | 2205 | # This file is part of DmpBbo, a set of libraries and programs for the
# black-box optimization of dynamical movement primitives.
# Copyright (C) 2014 Freek Stulp, ENSTA-ParisTech
#
# DmpBbo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# DmpBbo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DmpBbo. If not, see <http://www.gnu.org/licenses/>.
## \file demoExponentialSystem.py
## \author Freek Stulp
## \brief Visualizes results of demoExponentialSystem.cpp
##
## \ingroup Demos
## \ingroup DynamicalSystems
import matplotlib.pyplot as plt
import numpy
import os, sys
lib_path = os.path.abspath('../')
sys.path.append(lib_path)
from executeBinary import executeBinary
lib_path = os.path.abspath('../../python/')
sys.path.append(lib_path)
from dynamicalsystems.dynamicalsystems_plotting import *
if __name__=='__main__':
# Call the executable with the directory to which results should be written
executable = "./demoExponentialSystem"
directory = "./demoExponentialSystemDataTmp"
executeBinary(executable, directory)
fig = plt.figure(1)
data_ana = numpy.loadtxt(directory+"/analytical.txt")
plotDynamicalSystem(data_ana,[fig.add_subplot(1,2,1), fig.add_subplot(1,2,2)])
plt.title('analytical')
fig = plt.figure(2)
data_num = numpy.loadtxt(directory+"/numerical.txt")
plotDynamicalSystem(data_num,[fig.add_subplot(1,2,1), fig.add_subplot(1,2,2)])
plt.title('numerical')
fig = plt.figure(3)
axs = [fig.add_subplot(2,2,1), fig.add_subplot(2,2,2)]
axs_diff = [fig.add_subplot(2,2,3), fig.add_subplot(2,2,4)]
plotDynamicalSystemComparison(data_ana,data_num,'analytical','numerical',axs,axs_diff)
axs[1].legend()
plt.show()
| lgpl-2.1 |
araichev/make_gtfs | tests/test_main.py | 1 | 6169 | import pandas as pd
import gtfs_kit as gk
import shapely.geometry as sg
import geopandas as gpd
from .context import make_gtfs, DATA_DIR
from make_gtfs import *
# Load test ProtoFeed
pfeed = read_protofeed(DATA_DIR / "auckland")
def test_get_duration():
ts1 = "01:01:01"
ts2 = "01:05:01"
get = get_duration(ts1, ts2, units="min")
expect = 4
assert get == expect
def test_build_routes():
routes = build_routes(pfeed)
# Should be a data frame
assert isinstance(routes, pd.DataFrame)
# Should have correct shape
expect_nrows = pfeed.frequencies.drop_duplicates("route_short_name").shape[0]
expect_ncols = 4
assert routes.shape == (expect_nrows, expect_ncols)
def test_build_shapes():
shapes = build_shapes(pfeed)
# Should be a data frame
assert isinstance(shapes, pd.DataFrame)
# Should have correct shape
count = 0
for direction in pfeed.shapes_extra.values():
if direction == 0:
count += 1
else:
count += direction
expect_nshapes = count
expect_ncols = 4
assert shapes.groupby("shape_id").ngroups == expect_nshapes
assert shapes.shape[1] == expect_ncols
def test_build_stops():
# Test with null ``pfeed.stops``
pfeed_stopless = pfeed.copy()
pfeed_stopless.stops = None
shapes = build_shapes(pfeed_stopless)
stops = build_stops(pfeed_stopless, shapes)
# Should be a data frame
assert isinstance(stops, pd.DataFrame)
# Should have correct shape
nshapes = shapes.shape_id.nunique()
assert stops.shape[0] <= 2 * nshapes
assert stops.shape[1] == 4
# Test with non-null ``pfeed.stops``
stops = build_stops(pfeed)
# Should be a data frame
assert isinstance(stops, pd.DataFrame)
# Should have correct shape
assert stops.shape == pfeed.stops.shape
def test_build_trips():
routes = build_routes(pfeed)
__, service_by_window = build_calendar_etc(pfeed)
shapes = build_shapes(pfeed)
trips = build_trips(pfeed, routes, service_by_window)
# Should be a data frame
assert isinstance(trips, pd.DataFrame)
# Should have correct shape
f = pd.merge(routes[["route_id", "route_short_name"]], pfeed.frequencies)
f = pd.merge(f, pfeed.service_windows)
shapes = set(shapes["shape_id"].unique())
expect_ntrips = 0
for index, row in f.iterrows():
# Get number of trips corresponding to this row
# and add it to the total
frequency = row["frequency"]
if not frequency:
continue
start, end = row[["start_time", "end_time"]].values
duration = get_duration(start, end, "h")
direction = row["direction"]
if direction == 0:
trip_mult = 1
else:
trip_mult = direction
expect_ntrips += int(duration * frequency) * trip_mult
expect_ncols = 5
assert trips.shape == (expect_ntrips, expect_ncols)
def test_buffer_side():
s = sg.LineString([[0, 0], [1, 0]])
buff = 5
# Buffers should have correct area and orientation
for side in ["left", "right", "both"]:
b = buffer_side(s, side, buff)
p = b.representative_point()
if side == "left":
assert b.area >= buff
assert p.coords[0][1] > 0
elif side == "right":
assert b.area >= buff
assert p.coords[0][1] < 0
else:
assert b.area >= 2 * buff
def test_get_nearby_stops():
geom = sg.LineString([[0, 0], [2, 0]])
stops = gpd.GeoDataFrame(
[["a", sg.Point([1, 1])], ["b", sg.Point([1, -1])]],
columns=["stop_code", "geometry"],
)
for side in ["left", "right", "both"]:
n = get_nearby_stops(stops, geom, side, 1)
if side == "left":
assert n.shape[0] == 1
assert n.stop_code.iat[0] == "a"
elif side == "right":
assert n.shape[0] == 1
assert n.stop_code.iat[0] == "b"
else:
assert n.shape[0] == 2
assert set(n.stop_code.values) == {"a", "b"}
def test_build_stop_times():
# Test stopless version first
pfeed_stopless = pfeed.copy()
pfeed_stopless.stops = None
routes = build_routes(pfeed_stopless)
shapes = build_shapes(pfeed_stopless)
__, service_by_window = build_calendar_etc(pfeed_stopless)
stops = build_stops(pfeed_stopless, shapes)
trips = build_trips(pfeed_stopless, routes, service_by_window)
stop_times = build_stop_times(pfeed_stopless, routes, shapes, stops, trips)
# Should be a data frame
assert isinstance(stop_times, pd.DataFrame)
# Should have correct shape.
# Number of stop times is at most twice the number of trips,
# because each trip has at most two stops
assert stop_times.shape[0] <= 2 * trips.shape[0]
assert stop_times.shape[1] == 6
# Test with stops
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed)
__, service_by_window = build_calendar_etc(pfeed)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips)
# Should be a data frame
assert isinstance(stop_times, pd.DataFrame)
# Should have correct shape.
# Number of stop times is at least twice the number of trips,
# because each trip has two stops
assert stop_times.shape[0] >= 2 * trips.shape[0]
assert stop_times.shape[1] == 6
# Test with stops and tiny buffer so that no stop times are built
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips, buffer=0)
# Should be a data frame
assert isinstance(stop_times, pd.DataFrame)
# Should be empty
assert stop_times.empty
def test_build_feed():
feed = build_feed(pfeed)
# Should be a GTFSTK Feed
assert isinstance(feed, gk.Feed)
# Should have correct tables
names = ["agency", "calendar", "routes", "shapes", "stops", "stop_times", "trips"]
for name in names:
assert hasattr(feed, name)
# Should be a valid feed
v = feed.validate()
print(v)
assert "error" not in v.type.values
| mit |
eclee25/flu-SDI-exploratory-age | scripts/Znorm_OR_relative.py | 1 | 9452 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 4/9/14
###Function: calculate average z-ORs where the early warning and classification periods are defined as relative dates
## early warning period begins on the week after Thanksgiving and the two subsequent weeks (see My's data file: ThanksgivingWeekData_Revised.csv)
## retrospective period is based on the first two weeks of the beginning of the epidemic period (this will have to be decided from incidence charts? a file with the week data should probably be created)
### base Znorm_OR.py description
## Z-normalize (subtract mean and divide by SD) OR time series -- time series with average values greater than 1 during the classification periods are mild seasons -- time series with average values less than -1 during the classification periods are severe seasons
## Z-normalize OR time series based on mean and SD of first 7 or 10 weeks of flu season -- can first 7 or 10 weeks tell you about severity of flu season in first few weeks of second year?
###Import data:
###Command Line: python
##############################################
### notes ###
### packages/modules ###
import csv
import numpy as np
import matplotlib.pyplot as plt
import sys
from collections import defaultdict
## local modules ##
import ORgenerator as od
### data structures ###
# ORdict_znorm[week] = OR_znorm
ORdict_znorm = {} # all data
ORdict_znorm2 = {} # offices/OP only
# retrodict[season] = [retro period week date 1, retro period week date 2, ...]
retrodict_cum = defaultdict(list)
retrodict_pk = defaultdict(list)
retrodict_cum2 = defaultdict(list)
retrodict_pk2 = defaultdict(list)
### parameters ###
USchild = 20348657 + 20677194 + 22040343 # US child popn from 2010 Census
USadult = 21585999 + 21101849 + 19962099 + 20179642 + 20890964 + 22708591 + 22298125 + 19664805 # US adult popn from 2010 Census
seasons = range(2,11) # seasons for which ORs will be generated
# Season 2 = 2001-2, Season 3 = 2002-3, etc
normwks = 7 # normalize by weeks 40-47 in season
early_period = 2 # early warning period endures 2 weeks starting with the week after Thanksgiving
retro_period = 2 # retrospective period endures 2 weeks at beginning of epidemic
### functions ###
### import data ###
datain=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks.csv','r')
data=csv.reader(datain, delimiter=',')
# 4/23/14 added outpatient data
data2in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
data2=csv.reader(data2in, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
### program ###
ilidict, wkdict, weeks = od.import_dwk(data, 0, 1, 2, 3)
# ilidict[(week, age marker)] = ILI
# wkdict[week] = seasonnum
ORdict, ARdict = od.ORgen_wk(ilidict, weeks)
# ORdict[week] = OR
# ARdict[week] = attack rate per 10000
Thxdict = od.import_relative_early_period(thanks, 14, 13)
# Thxdict[seasonnum] = Sunday of Thanksgiving week date
# offices/op data only
ilidict2, wkdict2, weeks = od.import_dwk(data2, 0, 1, 2, 3)
# ilidict2[(week, age marker)] = ILI
# wkdict2[week] = seasonnum
ORdict2, ARdict2 = od.ORgen_wk(ilidict2, weeks)
# ORdict2[week] = OR
# ARdict2[week] = attack rate per 10000
## processing step: z-normalization ##
for s in seasons:
# wkdummy will represent list of weeks in season to use as key for OR dict
wkdummy = [key for key in sorted(weeks) if wkdict[key] == int(s)]
wkdummy = list(sorted(set(wkdummy)))
s_mean = np.mean([ORdict[wk] for wk in sorted(wkdummy)[:normwks]])
s_sd = np.std([ORdict[wk] for wk in sorted(wkdummy)[:normwks]])
dictdummyls = [(ORdict[wk]-s_mean)/s_sd for wk in sorted(wkdummy)]
for w, z in zip(sorted(wkdummy), dictdummyls):
ORdict_znorm[w] = z
# dictionary with retro period weeks for each season
print 'season', s
retrodict_cum[s] = od.import_relative_retro_period(wkdummy, ARdict, .15, 'cum_incid', retro_period) # cumulative incidence is difficult because it is over the course of the entire flu season; if considering only the main epidemic period, any value before 0.15 should be in the growth phase of the epidemic
retrodict_pk[s] = od.import_relative_retro_period(wkdummy, ARdict, 3, 'peak_wk', retro_period) # 2 wks prior to the peak week is safely within the growth phase of the epidemic curve
# offices/op data
s_mean2 = np.mean([ORdict2[wk] for wk in sorted(wkdummy)[:normwks]])
s_sd2 = np.std([ORdict2[wk] for wk in sorted(wkdummy)[:normwks]])
dictdummyls2 = [(ORdict2[wk]-s_mean2)/s_sd2 for wk in sorted(wkdummy)]
# print 'Office data: s_mean, s_sd, s_cv:', s_mean2, s_sd2, s_sd2/s_mean2
for w, z in zip(sorted(wkdummy), dictdummyls2):
ORdict_znorm2[w] = z
# dictionary with retro period weeks for each season, office/op data
retrodict_cum2[s] = od.import_relative_retro_period(wkdummy, ARdict2, .15, 'cum_incid', retro_period) # cumulative incidence is difficult because it is over the course of the entire flu season; if considering only the main epidemic period, any value before 0.15 should be in the growth phase of the epidemic
retrodict_pk2[s] = od.import_relative_retro_period(wkdummy, ARdict2, 3, 'peak_wk', retro_period) # 2 wks prior to the peak week is safely within the growth phase of the epidemic curve
# # all data
# # open file to write zOR averages
# # fwriter = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/zOR_avgs_relative.csv', 'w+')
# # fwriter.write('season,retro_mn,early_mn\n')
#
# # add 53rd week to season data if needed
# for s in seasons:
# wkdummy = [key for key in sorted(weeks) if wkdict[key] == int(s)]
# wkdummy = list(sorted(set(wkdummy)))
# if s == 1:
# chartORs = [ORdict_znorm[wk] for wk in sorted(wkdummy)]
# chartwks = xrange(13, 13 + len(sorted(wkdummy)))
# print "season number and num weeks", s, len(wkdummy), len(chartORs)
# elif len(wkdummy) == 53:
# chartORs = [ORdict_znorm[wk] for wk in sorted(wkdummy)]
# chartwks = xrange(len(sorted(wkdummy)))
# print "season number and num weeks", s, len(wkdummy), len(chartORs)
# else:
# chartORs = [ORdict_znorm[wk] for wk in sorted(wkdummy)]
# avg53 = (chartORs[12] + chartORs[13])/2
# chartORs.insert(13, avg53)
# chartwks = xrange(len(sorted(wkdummy)) + 1)
# print "season number and num weeks", s, len(wkdummy), len(chartORs)
#
# # processing: grab average z-OR during early warning and retrospective periods (after adding week 53 to all seasons)
# # class_mn =
# # early warning period is week after Thanksgiving week plus 1 subsequent week
# early_mn = np.mean([ORdict_znorm[wk] for wk in sorted(wkdummy) if wk > Thxdict[s]][1:early_period+1])
# # retrospective period may be defined as the two week period after x% cumulative incidence is surpassed or the two week period starting x weeks prior to peak incidence week
# retro_mn_cum = np.mean([ORdict_znorm[wk] for wk in retrodict_cum[s]])
# retro_mn_pk = np.mean([ORdict_znorm[wk] for wk in retrodict_pk[s]])
#
# # view results in terminal
# print 'season', s, early_mn, retro_mn_cum, retro_mn_pk
#
# #
# # fwriter.write('%s,%s,%s\n' % (s, retro_mn_pk, early_mn))
# # fwriter.close()
# office/OP data
# open file to write zOR averages
# fwriter = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/zOR_avgs_relative_outpatient.csv', 'w+')
# fwriter.write('season,retro_mn,early_mn\n')
# add 53rd week to season data if needed
for s in seasons:
wkdummy = [key for key in sorted(weeks) if wkdict[key] == int(s)]
wkdummy = list(sorted(set(wkdummy)))
if s == 1:
chartORs = [ORdict_znorm2[wk] for wk in sorted(wkdummy)]
chartwks = xrange(13, 13 + len(sorted(wkdummy)))
print "season number and num weeks", s, len(wkdummy), len(chartORs)
elif len(wkdummy) == 53:
chartORs = [ORdict_znorm2[wk] for wk in sorted(wkdummy)]
chartwks = xrange(len(sorted(wkdummy)))
print "season number and num weeks", s, len(wkdummy), len(chartORs)
elif s == 9:
chartOR_dummy = [ORdict_znorm2[wk] for wk in sorted(wkdummy)]
avg53 = (chartOR_dummy[12] + chartOR_dummy[13])/2
chartOR_dummy.insert(13, avg53)
chartORs = chartOR_dummy[:29]
chartwks = xrange(len(chartORs))
print "season number and num weeks", s, len(wkdummy)
else:
chartORs = [ORdict_znorm2[wk] for wk in sorted(wkdummy)]
avg53 = (chartORs[12] + chartORs[13])/2
chartORs.insert(13, avg53)
chartwks = xrange(len(sorted(wkdummy)) + 1)
print "season number and num weeks", s, len(wkdummy), len(chartORs)
# processing: grab average z-OR during early warning and retrospective periods (after adding week 53 to all seasons)
# class_mn =
# early warning period is week after Thanksgiving week plus 1 subsequent week
early_mn = np.mean([ORdict_znorm2[wk] for wk in sorted(wkdummy) if wk > Thxdict[s]][1:early_period+1])
# retrospective period may be defined as the two week period after x% cumulative incidence is surpassed or the two week period starting x weeks prior to peak incidence week
retro_mn_cum = np.mean([ORdict_znorm2[wk] for wk in retrodict_cum2[s]])
retro_mn_pk = np.mean([ORdict_znorm2[wk] for wk in retrodict_pk2[s]])
# view results in terminal
print 'season', s, early_mn, retro_mn_cum, retro_mn_pk
#
# fwriter.write('%s,%s,%s\n' % (s, retro_mn_pk, early_mn))
# fwriter.close()
| mit |
ChinaQuants/pyfolio | setup.py | 1 | 2520 | #!/usr/bin/env python
from setuptools import setup
import versioneer
DISTNAME = 'pyfolio'
DESCRIPTION = "pyfolio is a Python library for performance and risk analysis of financial portfolios"
LONG_DESCRIPTION = """pyfolio is a Python library for performance and risk analysis of
financial portfolios developed by `Quantopian Inc`_. It works well with the
`Zipline`_ open source backtesting library.
At the core of pyfolio is a so-called tear sheet that consists of
various individual plots that provide a comprehensive performance
overview of a portfolio.
.. _Quantopian Inc: https://www.quantopian.com
.. _Zipline: http://zipline.io
"""
MAINTAINER = 'Quantopian Inc'
MAINTAINER_EMAIL = 'opensource@quantopian.com'
AUTHOR = 'Quantopian Inc'
AUTHOR_EMAIL = 'opensource@quantopian.com'
URL = "https://github.com/quantopian/pyfolio"
LICENSE = "Apache License, Version 2.0"
VERSION = "0.3.1"
classifiers = ['Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: OS Independent']
install_reqs = [
'funcsigs>=0.4',
'matplotlib>=1.4.0',
'mock>=1.1.2',
'numpy>=1.9.1',
'pandas>=0.15.0',
'pyparsing>=2.0.3',
'python-dateutil>=2.4.2',
'pytz>=2014.10',
'scipy>=0.14.0',
'seaborn>=0.6.0',
'pandas-datareader>=0.2',
]
extras_reqs = {
'bayesian': ['pymc3']
}
test_reqs = ['nose>=1.3.7', 'nose-parameterized>=0.5.0', 'runipy>=0.1.3']
if __name__ == "__main__":
setup(
name=DISTNAME,
cmdclass=versioneer.get_cmdclass(),
version=versioneer.get_version(),
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
packages=['pyfolio', 'pyfolio.tests'],
package_data={'pyfolio': ['data/*.*']},
classifiers=classifiers,
install_requires=install_reqs,
extras_requires=extras_reqs,
tests_require=test_reqs,
test_suite='nose.collector',
)
| apache-2.0 |
bmazin/ARCONS-pipeline | examples/Pal2012-sdss/curve_average.py | 1 | 2036 | import numpy as np
import matplotlib.pyplot as plt
from util import utils
t08 = np.load('/home/pszypryt/sdss_data/20121208/Blue-Fit.npz')
t10 = np.load('/home/pszypryt/sdss_data/20121210/Blue10-Fit.npz')
t11 = np.load('/home/pszypryt/sdss_data/20121211/seq5Blue-Fit.npz')
params08 = t08['params']
params10 = t10['params']
params11 = t11['params']
jd08 = t08['jd']
jd10 = t10['jd']
jd11 = t11['jd']
params = np.vstack([params08,params10,params11])
jd = np.append(jd08,jd10)
jd = np.append(jd,jd11)
amps = params[:,1]
widths = params[:,4]
xpos = params[:,2]
ypos = params[:,3]
jd2 = (jd/0.01966127)%1.
iPeriod = np.array(jd/0.01966127,dtype=np.int)
iPeriod -= iPeriod[0]
fig = plt.figure()
ax = fig.add_subplot(111)
curve = amps*widths**2
curve1 =np.append(curve[0:510], curve[780:])
curve =np.append(curve1[0:510]/np.average(curve1[0:510]), curve1[510:1170]/np.average(curve1[510:1170]))
curve = np.append(curve,curve1[1170:]/np.average(curve1[1170:]))
jd2=np.append(jd2[0:510],jd2[780:])
#curve /= np.median(curve)
#amps /= np.median(amps)
fwhm = 2*np.sqrt(2*np.log(2))*widths#pixels
fwhm = 0.5*fwhm #arcsec
fwhm = widths
#medFwhm = utils.median_filterNaN(fwhm,size=5)
#meanFwhm = utils.mean_filterNaN(fwhm,size=5)
meanXpos = utils.mean_filterNaN(xpos,size=7)
meanYpos = utils.mean_filterNaN(ypos,size=7)
#curve/=np.median(curve)
fwhm/=np.median(fwhm)
numbins=155
binwidth = 1/float(numbins)
average_array = []
for i in range(numbins):
out_values = np.where(np.logical_and(jd2 >= i*binwidth,jd2 < (i+1)*binwidth))[0]
iCurve = curve[out_values]
iCurve = iCurve[iCurve != 0]
# iCurve = iCurve[iCurve < 700]
bin_average = np.median(iCurve)
average_array.append(bin_average)
x=np.linspace(0,1,numbins)
ax.plot(x-0.54,average_array,'k.')
plt.xlabel('Phase')
plt.ylabel('Scaled Photon Count')
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(20)
plt.show()
#np.savez('fix.npz',widths=medFwhm,x=meanXpos,y=meanYpos)
| gpl-2.0 |
Caranarq/01_Dmine | 07_Movilidad/P0706/P0706.py | 1 | 3153 | # -*- coding: utf-8 -*-
"""
Started on tue, feb 21st, 2018
@author: carlos.arana
"""
# Librerias utilizadas
import pandas as pd
import sys
module_path = r'D:\PCCS\01_Dmine\Scripts'
if module_path not in sys.path:
sys.path.append(module_path)
from VarInt.VarInt import VarInt
from classes.Meta import Meta
from Compilador.Compilador import compilar
"""
Las librerias locales utilizadas renglones arriba se encuentran disponibles en las siguientes direcciones:
SCRIPT: | DISPONIBLE EN:
------ | ------------------------------------------------------------------------------------
VarInt | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/VarInt
Meta | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Classes
Compilador | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Compilador
"""
# Documentacion del Parametro ---------------------------------------------------------------------------------------
# Descripciones del Parametro
M = Meta
M.ClaveParametro = 'P0706'
M.NombreParametro = 'Vialidades con guarnición'
M.DescParam = 'Numero de manzanas segun disponibilidad de guarnición en sus vialidades'
M.UnidadesParam = 'Numero de manzanas'
M.TituloParametro = 'Mguarni' # Para nombrar la columna del parametro
M.PeriodoParam = '2014'
M.TipoInt = 1
# Handlings
M.ParDtype = 'float'
M.TipoVar = 'C' # (Tipos de Variable: [C]ontinua, [D]iscreta [O]rdinal, [B]inaria o [N]ominal)
M.array = []
M.TipoAgr = 'sum'
# Descripciones del proceso de Minería
M.nomarchivodataset = M.ClaveParametro
M.extarchivodataset = 'xlsx'
M.ContenidoHojaDatos = 'Manzanas por municipio segun disponibilidad de guarnición en sus vialidades'
M.ClaveDataset = 'CLEU'
M.ActDatos = '2014'
M.Agregacion = 'Se clasificó el total de manzanas por municipio segun la disponibilidad de guarnición en alguna de ' \
'sus vialidades. Se agregaron a una sola columna las manzanas en donde alguna o todas sus ' \
'vialidades disponen de guarnición. Para la agregación de datos' \
'municipales a ciudades del SUN, se suma el numero de manzanas que disponen de banqueta en todos los ' \
'municipios que integran cada ciudad del SUN'
# Descripciones generadas desde la clave del parámetro
M.getmetafromds = 1
Meta.fillmeta(M)
# Construccion del Parámetro -----------------------------------------------------------------------------------------
# Cargar dataset inicial
dataset = pd.read_excel(M.DirFuente + '\\' + M.ArchivoDataset,
sheetname='DATOS', dtype={'CVE_MUN': 'str'})
dataset.set_index('CVE_MUN', inplace=True)
dataset = dataset.rename_axis('CVE_MUN')
dataset.head(2)
# Generar dataset para parámetro y Variable de Integridad
gt = 'Guarnicion en Todas las vialidades'
ga = 'Guarnicion en Alguna vialidad'
par_dataset = dataset[gt]+dataset[ga]
par_dataset = par_dataset.to_frame(name = M.ClaveParametro)
par_dataset, variables_dataset = VarInt(par_dataset, dataset, tipo=M.TipoInt)
# Compilacion
compilar(M, dataset, par_dataset, variables_dataset)
| gpl-3.0 |
terkkila/scikit-learn | examples/exercises/plot_cv_digits.py | 232 | 1206 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
CDNoyes/EDL-Py | EntryGuidance/Simulation.py | 1 | 40755 | import sys
from os import path
# sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
sys.path.append("./")
sys.path.append("../")
from Utils.RK4 import RK4
from Utils import DA as da
import pandas as pd
import numpy as np
from scipy.integrate import odeint, trapz
from scipy import linalg
from scipy.interpolate import interp1d
from scipy.io import savemat, loadmat
import logging
from transitions import Machine, State
from EntryEquations import Entry, System
from Planet import Planet
from EntryVehicle import EntryVehicle
# Graphing specific imports
from transitions.extensions import GraphMachine as MGraph
import webbrowser
class Cycle(object): #Guidance Cycle
def __init__(self, duration=1, freq=None):
if freq is None:
self.duration = duration
self.rate = 1./duration
else:
self.duration = 1./freq
self.rate = freq
class Simulation(Machine):
'''
Defines a simulation class. The class is initialized to create its finite-state machine.
Methods:
run - Runs the current simulation from a given state acting under a series of controllers and a realization of the uncertainty space.
getRef - Returns a dictionary of interpolation objects
plot - Plots a set of standard graphs. Does not show them, use Simulation.show() to bring them up. This can be useful to plot multiple trajectories before calling show.
Members:
'''
def __init__(self, states, conditions, cycle=None, output=True, find_transitions=True, use_da=False, final_state="Complete"):
if len(states) != len(conditions):
raise ValueError("Number of fsm states must equal number of transition conditions.")
if cycle is None:
if output:
print("Simulation using default guidance cycle.")
cycle = Cycle()
self._conditions = conditions
self._states = states
self._output = output
self._find_transitions = find_transitions
self._use_da = use_da
self.cycle = cycle # The guidance cycle governing the simulation. Data logging and control updates occur every cycle.duration seconds while trigger checking happens 10x per cycle
self.time = 0.0 # Current simulation time
self.times = [] # Collection of times at which the state history is logged
self.index = 0 # The index of the current phase
self.sample = None # Uncertainty sample to be run
self.x = None # Current state vector
self.history = [] # Collection of state vectors
self.u = None # Previous controls
self.control_history = [] # Collection of controls
self.ie = [0] # Indices of event transitions
self.edlModel = None # The dynamics and other functions associated with EDL
self.fullEDL = None # The type of edl model used - "ideal" with perfect knowledge and no bank angle constraints, or "full" truth/nav/constraints/filters etc
self.triggerInput = None # An input to triggers and controllers
self.simulations = 0 # The number of simulations run
states.append(final_state)
transitions = [{'trigger':'advance', 'source':states[i-1], 'dest':states[i], 'conditions':'integrate'} for i in range(1,len(states))]
try:
iPre = states.index('PreEntry')
transitions[0]['after'] = 'bank_reversal'
except:
pass
try:
iSRP = states.index('SRP')
if iSRP:
transitions[iSRP-1]['after'] = 'ignite'
except:
pass
Machine.__init__(self, model='self', states=states, initial=states[0], transitions=transitions, auto_transitions=False, after_state_change='printState')
def set_output(self, boolean):
self._output = boolean
def integrate(self):
while not self._conditions[self.index](da.const_dict(self.triggerInput)):
if self._output and not (len(self.history)-1*self.cycle.rate) % int(10*self.cycle.rate):
print("current simulation time = {} s".format(int(self.time))) # Should define a pretty print function and call that here
temp = self.__step() #Advance the numerical simulation, save resulting states for next check etc
return True
def __step(self):
if self.edlModel.powered:
throttle, mu, zeta = self.control[self.index](**self.triggerInput)
sigma = 0.
else:
sigma = self.control[self.index](**self.triggerInput)
throttle = 0. # fraction of max thrust
mu = 0. # pitch angle
zeta = 0. # yaw angle
if not self._use_da and self._time_constant and len(self.control_history) > 1:
# sigma = self.u[0] + (sigma - self.u[0])/self._time_constant * self.cycle.duration
# what if instead of time constant we use a rate limit
limit = np.radians(10)*self.cycle.duration
if np.abs(sigma-self.u[0]) > limit: # apply rate limiting
s = np.sign(sigma-self.u[0]) # increasing or decreasing command
sigma = self.u[0] + s*limit
if self._use_da:
X = RK4(self.edlModel.dynamics((sigma, throttle, mu)), self.x, np.linspace(self.time,self.time+self.cycle.duration,self.spc),())
else:
X = odeint(self.edlModel.dynamics((sigma, throttle, mu)), self.x, np.linspace(self.time,self.time+self.cycle.duration,self.spc))
self.update(X, self.cycle.duration, np.asarray([sigma, throttle, mu]))
def run(self, InitialState, Controllers, InputSample=None, FullEDL=False, AeroRatios=(1,1), StepsPerCycle=10, TimeConstant=0):
""" Runs the simulation from a given a initial state, with the specified controllers in each phase, and using a chosen sample of the uncertainty space """
self.reset()
self.spc = StepsPerCycle
self._time_constant = TimeConstant
if InputSample is None:
InputSample = np.zeros(4)
CD,CL,rho0,sh = InputSample
self.sample = InputSample
self.fullEDL = FullEDL
if self.fullEDL:
self.edlModel = System(InputSample=InputSample) # Need to eventually pass knowledge error here
if self._output:
try:
print("L/D: {:.2f}".format(self.edlModel.truth.vehicle.LoD))
print("BC : {:.1f} kg/m^2".format(self.edlModel.truth.vehicle.BC(InitialState[6])))
except TypeError:
print("L/D: {}".format(self.edlModel.truth.vehicle.LoD))
print("BC : {} kg/m^2".format(self.edlModel.truth.vehicle.BC(InitialState[6])))
else:
Longitudinal = len(InitialState) < 7
self.edlModel = Entry(PlanetModel=Planet(rho0=rho0, scaleHeight=sh, da=self._use_da), VehicleModel=EntryVehicle(CD=CD, CL=CL), DifferentialAlgebra=self._use_da, Longitudinal=Longitudinal)
self.edlModel.update_ratios(LR=AeroRatios[0], DR=AeroRatios[1])
if self._output:
try:
print("L/D: {:.3f}".format(self.edlModel.vehicle.LoD))
print("BC : {:.1f} kg/m^2".format(self.edlModel.vehicle.BC(InitialState[-1])))
except TypeError: # Da variables
print("L/D: {}".format(self.edlModel.vehicle.LoD))
print("BC : {} kg/m^2".format(self.edlModel.vehicle.BC(InitialState[-1])))
self.update(np.asarray(InitialState),0.0,np.asarray([0]*3))
self.control = Controllers
while not self.is_Complete():
temp = self.advance()
self.history = np.vstack(self.history) # So that we can work with the data more easily than a list of arrays
self.control_history.append(self.u) # So that the control history has the same length as the data;
self.control_history = np.vstack(self.control_history[1:])
self.simulations += 1
if not self.simulations % 10:
print("{} simulations complete.".format(self.simulations))
# print self.x[0]
try:
return self.postProcess()
except:
print("Could not post process (not implemented for longitudinal model)")
return None
def update(self, x, dt, u):
if len(x.shape) == 1:
self.x = x
N = 1
else:
N = x.shape[0]
self.x = x[-1, :]
if u is not None:
self.u = u
if N == 1:
self.control_history.append(self.u)
else:
U = np.tile(u[:,None], (N,)).T
self.control_history.extend(U)
if N == 1:
self.history.append(self.x)
self.times.append(self.time)
else:
self.history.extend(x)
self.times.extend(np.linspace(self.time, self.time+dt, self.spc))
self.time += dt
self.triggerInput = self.getDict()
def printState(self):
# find nearest endpoint here - the trigger was met in the last ten steps
if self._find_transitions:
self.findTransition()
if self._output:
print('Transitioning from state {} to {} because the following condition was met:'.format(self._states[self.index], self.state))
self._conditions[self.index].dump()
for key,value in self.triggerInput.items():
if key not in ('vehicle', 'current_state', 'planet'):
print('{} : {}\n'.format(key, value))
self.index += 1
self.ie.append(len(self.history)-1)
def getDict(self):
if self.fullEDL:
L,D = self.edlModel.nav.aeroforces(np.array([self.x[8]]),np.array([self.x[11]]),np.array([self.x[15]]))
d = {
'time' : self.time,
'altitude' : self.edlModel.nav.altitude(self.x[7]),
'longitude' : self.x[8],
'latitude' : self.x[9],
'velocity' : self.x[10],
'fpa' : self.x[11],
'heading' : self.x[12],
# 'rangeToGo' : self.x[14], # has to be computed
'mass' : self.x[13],
'drag' : D[0],
'lift' : L[0],
'vehicle' : self.edlModel.nav.vehicle,
'planet' : self.edlModel.nav.planet,
'current_state' : self.x[7:14],
'aero_ratios' : self.x[14:16],
'bank' : self.x[16], # Should this be the current command or the current state?
'energy' : self.edlModel.nav.energy(self.x[8],self.x[11],Normalized=False), # Estimated energy
}
else:
if np.size(self.x, 0) == 7:
r,theta,phi,v,gamma,psi,m = self.x
L,D = self.edlModel.aeroforces(self.x[0],self.x[3],self.x[6])
E = self.edlModel.energy(self.x[0],self.x[3],Normalized=False)
rtg = 0
s = theta * 3396.2 # approximation
else:
r,s,v,gamma,m = self.x
L,D = self.edlModel.aeroforces(self.x[0],self.x[2],self.x[4])
E = self.edlModel.energy(self.x[0],self.x[2],Normalized=False)
phi = 0
theta = s/3396.2e3
psi = 0
rtg = 0
# rtg = self.x[2]*self.edlModel.planet.radius #self.edlModel.planet.range() # TODO: Compute this
d = {
'time' : self.time,
'altitude' : self.edlModel.altitude(self.x[0]),
'longitude' : theta,
'latitude' : phi,
'velocity' : v,
'fpa' : gamma,
'heading' : psi,
'rangeToGo' : rtg,
'range' : s,
'mass' : m,
'drag' : D,
'lift' : L,
'vehicle' : self.edlModel.vehicle,
'planet' : self.edlModel.planet,
'current_state' : self.x,
'aero_ratios' : (self.edlModel.lift_ratio, self.edlModel.drag_ratio),
'bank' : self.u[0],
'energy' : E,
}
return d
def ignite(self):
self.edlModel.ignite()
def bank_reversal(self):
self.u[0] *= -1
self.triggerInput = self.getDict()
def viz(self, **kwargs):
from TrajPlot import TrajPlot
h = self.df['altitude'].values
lat = np.radians(self.df['latitude'].values)
long = np.radians(self.df['longitude'].values)
# z = h*np.sin(lat)
# x = h*np.cos(lat)*np.cos(long)
# y = h*np.cos(lat)*np.sin(long)
z = h
y = self.df['crossrange']
x = self.df['downrange']
TrajPlot(x, y, z, **kwargs)
def plot(self, plotEvents=True, compare=True, legend=True, plotEnergy=False):
import matplotlib.pyplot as plt
from Utils import DA
# To do: replace calls to self.history etc with data that can be passed in; If data=None, data = self.postProcess()
if self.fullEDL:
fignum = simPlot(self.edlModel.truth, self.times, self.history[:,0:7], self.history[:,16], plotEvents, self._states, self.ie, fignum=1, legend=legend, plotEnergy=plotEnergy)
if compare:
fignum = simPlot(self.edlModel.nav, self.times, self.history[:,7:14], self.control_history[:,0], plotEvents, self._states, self.ie, fignum=1, legend=legend, plotEnergy=False) # Use same fignum for comparisons, set fignum > figures for new ones
# else:
# fignum = simPlot(self.edlModel.nav, self.times, self.history[:,8:16], self.control_history[:,0], plotEvents, self._states, self.ie, fignum=fignum, label="Navigated ")
plt.figure(fignum)
plt.plot(self.times, self.history[:,14],label='Lift')
plt.plot(self.times, self.history[:,15], label='Drag')
if legend:
plt.legend(loc='best')
plt.title('Aerodynamic Filter Ratios')
else:
simPlot(self.edlModel, self.times, DA.const(self.history), DA.const(self.control_history[:,0]), plotEvents, self._states, self.ie, fignum=1, plotEnergy=plotEnergy, legend=legend)
def show(self):
import matplotlib.pyplot as plt
plt.show()
def postProcess(self):
if self._use_da:
from Utils.DA import degrees, radians
else:
from numpy import degrees, radians
self.control_history = self.control_history.astype(float)
self.history = self.history.astype(float)
if self.fullEDL:
bank_cmd = degrees(self.control_history[:,0])
r,theta,phi = self.history[:,0], degrees(self.history[:,1]), degrees(self.history[:,2])
v,gamma,psi = self.history[:,3], degrees(self.history[:,4]), degrees(self.history[:,5])
m = self.history[:,6]
r_nav,theta_nav,phi_nav = self.history[:,8], degrees(self.history[:,9]), degrees(self.history[:,10])
v_nav,gamma_nav,psi_nav = self.history[:,11], degrees(self.history[:,12]), degrees(self.history[:,13])
m_nav = self.history[:,15]
RL,RD = self.history[:,16], self.history[:,17]
bank, bank_rate = degrees(self.history[:,18]), degrees(self.history[:,19])
x0 = self.history[0,:]
range = [self.edlModel.truth.planet.range(*x0[[1,2,5]],lonc=radians(lon),latc=radians(lat),km=True) for lon,lat in zip(theta,phi)]
range_nav = [self.edlModel.nav.planet.range(*x0[[9,10,13]],lonc=radians(lon),latc=radians(lat),km=True) for lon,lat in zip(theta_nav,phi_nav)]
energy = self.edlModel.truth.energy(r, v, Normalized=False)
energy_nav = self.edlModel.nav.energy(r_nav, v_nav, Normalized=False)
h = [self.edlModel.truth.altitude(R,km=True) for R in r]
h_nav = [self.edlModel.nav.altitude(R,km=True) for R in r_nav]
L,D = self.edlModel.truth.aeroforces(r,v,m)
L_nav,D_nav = self.edlModel.nav.aeroforces(r_nav,v_nav,m_nav)
data = np.c_[self.times, energy, bank_cmd, h, r, theta, phi, v, gamma, psi, range, L, D,
energy_nav, bank, h_nav, r_nav, theta_nav, phi_nav, v_nav, gamma_nav, psi_nav, range_nav, L_nav, D_nav]
vars = ['energy','bank','altitude','radius','longitude','latitude','velocity','fpa','heading','downrange','crossrange','lift','drag']
all = ['time'] + vars + [var + '_nav' for var in vars ]
self.df = pd.DataFrame(data, columns=all)
else:
bank_cmd = degrees(self.control_history[:,0])
r,theta,phi = self.history[:,0], degrees(self.history[:,1]), degrees(self.history[:,2])
v,gamma,psi = self.history[:,3], degrees(self.history[:,4]), degrees(self.history[:,5])
m = self.history[:,6]
x0 = self.history[0,:]
range = [self.edlModel.planet.range(*x0[[1,2,5]],lonc=radians(lon),latc=radians(lat),km=True) for lon,lat in zip(theta,phi)]
energy = self.edlModel.energy(r, v, Normalized=False)
h = [self.edlModel.altitude(R, km=True) for R in r]
if self._use_da:
L,D = np.array([self.edlModel.aeroforces(ri,vi,mi) for ri,vi,mi in zip(r,v,m)]).T
else:
L,D = self.edlModel.aeroforces(r,v,m)
data = np.c_[self.times, energy, bank_cmd, h, r, theta, phi, v, gamma, psi, range, L, D, m]
self.df = pd.DataFrame(data, columns=['time','energy','bank','altitude','radius','longitude','latitude','velocity','fpa','heading','downrange','crossrange','lift','drag','mass'])
self.output = data
return data
def reset(self):
""" Resets all simulation states to prepare for the 'run' method to be used again.
The only exception is the .simuations member whose purpose to record the number of times
'run' has been used for data reporting in e.g. Monte Carlo simulations.
"""
if self._output:
print("Resetting simulation states.\n")
self.set_state(self._states[0])
self.time = 0.0
self.times = []
self.index = 0
self.sample = None # Input uncertainty sample
self.x = None # Current State vector
self.history = [] # Collection of State Vectors
self.u = None
self.control_history = [] # Collection of Control Vectors
self.ie = [0]
self.edlModel = None
self.triggerInput = None
self.control = None
self.output = None
def getRef(self):
""" Computes a reference object for use in tracking based guidance
"""
ref = {}
vel = np.flipud(self.output[:,7]) # Flipped to be increasing for interp1d limitation
alt = np.flipud(self.output[:,3]) # km
radius = np.flipud(self.output[:,4]) # m
range = np.flipud(self.output[-1,10]*1e3-self.output[:,10]*1e3) # Range to go
drag = np.flipud(self.output[:,13])
drag_rate = np.flipud(np.diff(self.output[:,13])/np.diff(self.output[:,0]))
dragcos = np.flipud(self.output[:,13]/np.cos(np.radians(self.output[:,8])))
bank = np.flipud(self.output[:,2])
u = np.cos(np.radians(bank))
hdot = vel*np.flipud(np.sin(np.radians(self.output[:,8])))
i_vmax = np.argmax(vel) # Only interpolate from the maximum downward so the reference is monotonic
energy = np.flipud(self.output[:,1])
# i_emax = np.argmax(energy)
i_emax=i_vmax
# Should probably use a loop or comprehension at this point...
# Velocity as independent variable
ref['drag'] = interp1d(vel[:i_vmax],drag[:i_vmax], fill_value=(drag[0],drag[i_vmax]), assume_sorted=True, bounds_error=False, kind='cubic')
ref['drag_rate'] = interp1d(vel[:i_vmax],drag_rate[:i_vmax], fill_value=(drag_rate[0],drag_rate[i_vmax]), assume_sorted=True, bounds_error=False, kind='cubic')
ref['altitude'] = interp1d(vel[:i_vmax],alt[:i_vmax], fill_value=(alt[0],alt[i_vmax]), assume_sorted=True, bounds_error=False, kind='cubic')
ref['rangeToGo'] = interp1d(vel[:i_vmax],range[:i_vmax], fill_value=(range[0],range[i_vmax]), assume_sorted=True, bounds_error=False)
ref['bank'] = interp1d(vel[:i_vmax],bank[:i_vmax], fill_value=(bank[0],bank[i_vmax]), assume_sorted=True, bounds_error=False, kind='nearest')
ref['u'] = interp1d(vel[:i_vmax],u[:i_vmax], fill_value=(u[0],u[i_vmax]), assume_sorted=True, bounds_error=False, kind='nearest')
fpa = np.radians(self.output[:,8])
ref['fpa'] = interp1d(vel[:i_vmax],fpa[:i_vmax], fill_value=(fpa[0],fpa[i_vmax]), assume_sorted=True, bounds_error=False, kind='nearest')
# Range as independent variable
# import matplotlib.pyplot as plt
# plt.figure(660)
# plt.plot(range)
# plt.show()
# ref['altitude_range'] = interp1d(range, alt, fill_value=(alt[0],alt[-1]), assume_sorted=True, bounds_error=False, kind='cubic')
# Energy as independent variable
ref['dragcos'] = interp1d(energy[:i_emax],dragcos[:i_emax], fill_value=(dragcos[0],dragcos[i_emax]), assume_sorted=True, bounds_error=False, kind='cubic')
ref['drag_energy'] = interp1d(energy[:i_emax], drag[:i_emax], fill_value=(drag[0],drag[i_emax]), assume_sorted=True, bounds_error=False, kind='cubic')
ref['drag_rate_energy'] = interp1d(energy[:i_emax],drag_rate[:i_emax], fill_value=(drag_rate[0],drag_rate[i_emax]), assume_sorted=True, bounds_error=False, kind='cubic')
ref['altitude_rate'] = interp1d(energy[:i_emax], hdot[:i_emax], fill_value=(hdot[0],hdot[i_emax]), assume_sorted=True, bounds_error=False, kind='cubic')
return ref
def getFBL(self):
from FBL import drag_dynamics, drag_derivatives
fbl = {}
df = self.df
# States
radius = df['radius'].values # m
vel = df['velocity'].values
fpa = np.radians(df['fpa'].values)
bank = np.radians(df['bank'].values)
u = np.cos(bank)
# Accels
drag = df['drag'].values
lift = df['lift'].values
g = self.edlModel.gravity(radius)
# Drag derivs
drag_rate,drag_accel = drag_derivatives(u, lift, drag, g, radius, vel, fpa, self.edlModel.planet.atmosphere(radius-self.edlModel.planet.radius)[0], self.edlModel.planet.scaleHeight)
a,b = drag_dynamics(drag, drag_rate, g, lift, radius, vel, fpa, self.edlModel.planet.atmosphere(radius-self.edlModel.planet.radius)[0], self.edlModel.planet.scaleHeight)
# Independent variable
energy = df['energy'].values
i_vmax = np.argmax(vel) # Only interpolate from the maximum downward so the reference is monotonic
i_emax=i_vmax
# Interpolation objects (fill values are backward because energy is decreasing)
fbl['bank'] = interp1d(energy[i_emax:], bank[i_emax:], fill_value=(bank[-1],bank[i_emax]), bounds_error=False, kind='cubic')
fbl['a'] = interp1d(energy[i_emax:], a[i_emax:], fill_value=(a[-1],a[i_emax]), bounds_error=False, kind='cubic')
fbl['b'] = interp1d(energy[i_emax:], b[i_emax:], fill_value=(b[-1],b[i_emax]), bounds_error=False, kind='cubic')
fbl['D'] = interp1d(energy[i_emax:], drag[i_emax:], fill_value=(drag[-1],drag[i_emax]), bounds_error=False, kind='cubic')
fbl['D1'] = interp1d(energy[i_emax:], drag_rate[i_emax:], fill_value=(drag_rate[-1],drag_rate[i_emax]), bounds_error=False, kind='cubic')
fbl['D2'] = interp1d(energy[i_emax:], drag_accel[i_emax:], fill_value=(drag_accel[-1],drag_accel[i_emax]), bounds_error=False, kind='cubic')
return fbl
def findTransition(self):
n = len(self.times)
if n > 1:
for i in range(n-2, n-1-self.spc, -1):
# Find the states to interpolate between:
# print(i)
self.time = self.times[i]
self.x = self.history[i]
self.u = self.control_history[i]
self.triggerInput = self.getDict()
if self._use_da:
self.triggerInput = da.const_dict(self.triggerInput)
if not self._conditions[self.index](self.triggerInput):
break
N = max(10, int(1000/self.spc)) # Always use at least 10 points
for j in np.linspace(0.01, 0.99, N): # The number of points used here will determine the accuracy of the final state
# Find a better state:
self.time = ((1-j)*self.times[i] + j*self.times[i+1])
self.x = ((1-j)*self.history[i] + j*self.history[i+1])
self.u = ((1-j)*self.control_history[i] + j*self.control_history[i+1])
self.triggerInput = self.getDict()
if self._use_da:
self.triggerInput = da.const_dict(self.triggerInput)
if self._conditions[self.index](self.triggerInput):
break
# Remove the extra states:
self.history = self.history[0:i+1]
self.times = self.times[0:i+1]
self.control_history = self.control_history[0:i+1]
# Update the final point
self.history.append(self.x)
self.control_history.append(self.u)
self.times.append(self.time)
def gui(self):
import datetime
uniq_filename = str(datetime.datetime.now().date()) + '_' + str(datetime.datetime.now().time()).replace(':', '.')
def simPlot(edlModel, time, history, control_history, plotEvents, fsm_states, ie, fignum=1, label='', legend=True, plotEnergy=False):
import matplotlib.pyplot as plt
#history = [r, theta, phi, v, gamma, psi, s, m, DR, CR]
# Altitude vs Velocity
plt.figure(fignum)
fignum += 1
plt.plot(history[:,3], edlModel.altitude(history[:,0], km=True), lw = 3)
if plotEvents:
for i in ie:
if legend:
plt.plot(history[i,3],edlModel.altitude(history[i,0],km=True),'o',label = fsm_states[ie.index(i)], markersize=12)
else:
plt.plot(history[i,3],edlModel.altitude(history[i,0],km=True),'o', markersize=12)
if legend:
plt.legend(loc='upper left')
plt.xlabel(label+'Velocity (m/s)')
plt.ylabel(label+'Altitude (km)')
e = edlModel.energy(history[:,0],history[:,3],Normalized=False)
if plotEnergy: # Draw constant energy contours
V,R = np.meshgrid(history[:,3], history[:,0])
E = (np.array([edlModel.energy(r,V[0],Normalized=False) for r in R])-np.max(e))/(np.min(e)-np.max(e))
V,H = np.meshgrid(history[:,3], edlModel.altitude(history[:,0],km=True))
levels = (np.linspace(0,1,101))
CS = plt.contour(V,H,(E),levels=levels,cmap='RdBu')
plt.colorbar(format="%.2f")
# plt.clabel(CS, inline=1, fontsize=10)
if False: # Draw constant drag contours
V,R = np.meshgrid(history[:,3], history[:,0])
D_matrix = []
for r in R:
L,D = edlModel.aeroforces(r,V[0],history[:,6])
D_matrix.append(D)
levels = np.logspace(-5,2.4,11, endpoint=True)
CS = plt.contour(V,H,(D_matrix),levels=levels,colors='k')
# plt.clabel(CS, inline=1, fontsize=10)
plt.clabel(CS)
if False:
en = edlModel.energy(history[:,0],history[:,3],Normalized=True)
plt.figure(fignum)
fignum += 1
plt.plot(history[:,3], en, lw = 3)
if plotEvents:
for i in ie:
plt.plot(history[i,3],en[i],'o',label = fsm_states[ie.index(i)], markersize=12)
if legend:
plt.legend(loc='upper left')
plt.xlabel(label+'Velocity (m/s)')
plt.ylabel(label+'Energy (-)')
# #Latitude/Longitude
plt.figure(fignum)
fignum += 1
plt.plot(history[:,1]*180/np.pi, history[:,2]*180/np.pi)
if plotEvents:
for i in ie:
if legend:
plt.plot(history[i,1]*180/np.pi, history[i,2]*180/np.pi,'o',label = fsm_states[ie.index(i)])
else:
plt.plot(history[i,1]*180/np.pi, history[i,2]*180/np.pi,'o')
if legend:
plt.legend(loc='best')
plt.xlabel(label+'Longitude (deg)')
plt.ylabel(label+'Latitude (deg)')
# plt.legend()
# Range vs Velocity
plt.figure(fignum)
fignum += 1
plt.plot(history[:,3], (history[0,6]-history[:,6])/1000)
if plotEvents:
for i in ie:
plt.plot(history[i,3],(history[0,6]-history[i,6])/1000,'o',label = fsm_states[ie.index(i)])
if legend:
plt.legend(loc='best')
plt.xlabel(label+'Velocity (m/s)')
plt.ylabel(label+'Trajectory length (km)')
# Bank Angle Profile
plt.figure(fignum)
fignum += 1
plt.plot(time, np.degrees(control_history[:]))
for i in ie:
plt.plot(time[i], np.degrees(control_history[i]),'o',label = fsm_states[ie.index(i)])
if legend:
plt.legend(loc='best')
plt.xlabel(label+'Time (s)')
plt.ylabel(label+'Bank Angle (deg)')
# vs energy
plt.figure(fignum)
fignum += 1
plt.plot(e, np.degrees(control_history[:]))
for i in ie:
plt.plot(e[i], np.degrees(control_history[i]),'o',label = fsm_states[ie.index(i)])
if legend:
plt.legend(loc='best')
plt.xlabel(label+'Energy (s)')
plt.ylabel(label+'Bank Angle (deg)')
# vs velocity
plt.figure(fignum)
fignum += 1
plt.plot(history[:,3], np.degrees(control_history[:]))
for i in ie:
plt.plot(history[i,3], np.degrees(control_history[i]),'o',label = fsm_states[ie.index(i)])
if legend:
plt.legend(loc='best')
plt.xlabel(label+'Velocity (m/s)')
plt.ylabel(label+'Bank Angle (deg)')
# Control vs Velocity Profile
# plt.figure(fignum)
# fignum += 1
# plt.plot(history[:,3], np.cos(control_history[:]))
# plt.plot(history[:,3], np.ones_like(control_history[:]),'k--',label='Saturation limit')
# plt.plot(history[:,3], -np.ones_like(control_history[:]),'k--')
# for i in ie:
# plt.plot(history[i,3], np.cos(control_history[i]),'o',label = fsm_states[ie.index(i)])
# if legend:
# plt.legend(loc='best')
# plt.axis([300,5505,-1.5,1.5])
# plt.xlabel(label+'Velocity (m/s)')
# plt.ylabel(label+'u=cos(sigma) (-)')
# Downrange vs Crossrange
range = np.array([edlModel.planet.range(*history[0,[1,2,5]],lonc=lon,latc=lat,km=True) for lon,lat in zip(history[:,1],history[:,2])])
plt.figure(fignum)
fignum += 1
plt.plot(range[:,1], range[:,0])
for i in ie:
plt.plot(range[i,1], range[i,0],'o',label = fsm_states[ie.index(i)])
if legend:
plt.legend(loc='best')
plt.xlabel(label+'Cross Range (km)')
plt.ylabel(label+'Down Range (km)')
plt.axis('equal')
# Flight path vs Velocity
plt.figure(fignum)
fignum += 1
plt.plot(history[:,3], history[:,4]*180/np.pi)
if plotEvents:
for i in ie:
plt.plot(history[i,3],history[i,4]*180/np.pi, 'o', label=fsm_states[ie.index(i)])
if legend:
plt.legend(loc='best')
plt.xlabel(label+'Velocity (m/s)')
plt.ylabel(label+'Flight path angle (deg)')
L,D = edlModel.aeroforces(history[:,0],history[:,3],history[:,6])
g = edlModel.gravity(history[:,0])
plt.figure(fignum)
fignum += 1
plt.plot(history[:,3],D)
if plotEvents:
for i in ie:
plt.plot(history[i,3], D[i], 'o', label=fsm_states[ie.index(i)])
plt.ylabel('Drag (m/s^2)')
plt.xlabel('Velocity (m/s)')
if legend:
plt.legend(loc='best')
# #########################################################################
if False:
from FBL import drag_derivatives, drag_dynamics
# u, L, D, g, r, V, gamma, rho, scaleHeight
Ddot,Dddot = drag_derivatives(np.cos(control_history), L,D,g, history[:,0],history[:,3],history[:,4], edlModel.planet.atmosphere(history[:,0]-edlModel.planet.radius)[0],edlModel.planet.scaleHeight)
plt.figure(fignum)
fignum += 1
plt.plot(time[1:],np.diff(D,1)/time[1],'--')
plt.plot(time,Ddot)
if plotEvents:
for i in ie:
plt.plot(time[i], Ddot[i],'o',label = fsm_states[ie.index(i)])
plt.ylabel('Drag Rate (ms^-3)')
# plt.xlabel('Velocity (m/s)')
if legend:
plt.legend(loc='best')
plt.figure(fignum)
fignum += 1
plt.plot(time[2:],np.diff(D,2)/time[1]**2,'--')
plt.plot(time,Dddot)
if plotEvents:
for i in ie:
plt.plot(time[i], Dddot[i],'o',label = fsm_states[ie.index(i)])
plt.ylabel('Drag second deriv (ms^-4)')
# plt.xlabel('Velocity (m/s)')
if legend:
plt.legend(loc='best')
a,b=drag_dynamics(D, Ddot, g, L, history[:,0],history[:,3],history[:,4], edlModel.planet.atmosphere(history[:,0]-edlModel.planet.radius)[0],edlModel.planet.scaleHeight)
u_test = (Dddot - a)/b
bank_test = np.arccos(u_test)*np.sign(control_history[:])
plt.figure(4)
plt.plot(time, np.degrees(bank_test),'k--')
# ##########################################################################
return fignum
# ########################################################## #
# Simple functions to create various simulation combinations #
# ########################################################## #
def SRP():
""" Defines states and conditions for a trajectory from Pre-Entry through SRP-based EDL """
from Triggers import AccelerationTrigger, VelocityTrigger, AltitudeTrigger, MassTrigger
states = ['PreEntry','Entry','SRP']
def combo(inputs):
return (AltitudeTrigger(2)(inputs) or MassTrigger(6400)(inputs))
combo.dump = AltitudeTrigger(2).dump
conditions = [AccelerationTrigger('drag',2), VelocityTrigger(700), VelocityTrigger(50)]
input = { 'states' : states,
'conditions' : conditions }
return input
def EntrySim(Vf=500):
''' Defines conditions for a simple one phase guided entry '''
from Triggers import VelocityTrigger
states = ['Entry']
trigger = [VelocityTrigger(Vf)]
return {'states':states, 'conditions':trigger}
def TimedSim(time=30):
from Triggers import TimeTrigger
states = ['Entry']
trigger = [TimeTrigger(time)]
return {'states':states, 'conditions':trigger}
def testSim():
sim = Simulation(cycle=Cycle(1),**SRP())
f = lambda **d: 0
f2 = lambda **d: (1,2.88)
c = [f,f,f2]
r0, theta0, phi0, v0, gamma0, psi0,s0 = (3540.0e3, np.radians(-90.07), np.radians(-43.90),
5505.0, np.radians(-14.15), np.radians(4.99), 1180e3)
x0 = np.array([r0, theta0, phi0, v0, gamma0, psi0, s0, 8500.0])
sim.run(x0,c)
return sim
def testFullSim():
from InitialState import InitialState
sim = Simulation(cycle=Cycle(1),output=True,**EntrySim())
f = lambda **d: 0
f2 = lambda **d: (1,2.88)
c = [f,f,f2]
r0, theta0, phi0, v0, gamma0, psi0,s0 = (3540.0e3, np.radians(-90.07), np.radians(-43.90),
5505.0, np.radians(-14.15), np.radians(4.99), 1180e3)
# x0 = np.array([r0, theta0, phi0, v0, gamma0, psi0, s0, 8500.0]*2 +[1,1] + [0,0])
x0 = InitialState(full_state=True)
sim.run(x0, c, FullEDL=True)
return sim
def NMPCSim(options):
# Move this to MPC.py
from Triggers import TimeTrigger
states = ['State{}'.format(i) for i in range(0,options['N'])]
times = np.linspace(0,options['T'],options['N']+1)
triggers = [TimeTrigger(t) for t in times[1:]]
return {'states':states, 'conditions':triggers}
def testNMPCSim():
from functools import partial
sim = Simulation(**NMPCSim({'N': 3, 'T' : 120}))
vals = [0,1.5,0.25]
c = [partial(constant, value=v) for v in vals]
r0, theta0, phi0, v0, gamma0, psi0,s0 = (3540.0e3, np.radians(-90.07), np.radians(-43.90),
5505.0, np.radians(-14.15), np.radians(4.99), 1180e3)
x0 = np.array([r0, theta0, phi0, v0, gamma0, psi0, s0, 8500.0])
sim.run(x0,c)
return sim
def testDASim(x0, control_profile):
from Utils import DA as da
from pyaudi import gdual_double
r0, theta0, phi0, v0, gamma0, psi0,s0 = (3540.0e3, np.radians(-90.07), np.radians(-43.90),
5505.0, np.radians(-14.15), np.radians(4.99), 1180e3)
if x0 is None:
# x0 = np.array([r0, theta0, phi0, v0, gamma0, psi0, 8500.0])
# names = ['r', 'theta', 'phi', 'V', 'fpa', 'psi', 'm']
r0 = 3396.2e3 + 39.4497e3
x0 = np.array([r0, 0, 5461.4, np.radians(-10.604), 5000.0]) # longitudinal state
names = ['r', 's', 'V', 'fpa', 'm']
Vf = 480
dasim = Simulation(cycle=Cycle(1), output=True, use_da=True, **EntrySim(Vf=Vf), )
x0d = da.make(x0, names, 1)
u = gdual_double(0, 'u', 1)
def test_profile(velocity, **args):
if velocity.constant_cf > 3600:
return 0 + u
return 1 + u
# ref_profile = lambda **args: u
print(x0d)
res = dasim.run(x0d, [test_profile])
print(dasim.x)
# dasim.plot()
STM = da.jacobian(dasim.x, names)
P0 = np.diag([2500, 10000, 0, np.radians(0.25), 0])**2
P = STM.dot(P0).dot(STM.T)
print(np.diag(P)**0.5)
return dasim
def constant(value, **kwargs):
return value
# #########################
# Visualization Extension #
# #########################
class SimulationGraph(MGraph,Machine):
'''
Defines a simulation graph class for viewing the simulation's core finite state-machine.
'''
def __init__(self, *args, **kwargs):
self.nDrawn = 0
super(SimulationGraph, self).__init__(*args, **kwargs)
def show(self):
self.graph.draw('SimulationFSM-{}.png'.format(self.nDrawn), prog='dot')
webbrowser.open('SimulationFSM-{}.png'.format(self.nDrawn))
self.nDrawn += 1
def getSim():
states = ['Pre-Entry','Entry','Heading\nAlignment','SRP'] # SRP-enabled mission
transitions = [{'trigger' : 'begin_entry', 'source' : 'Pre-Entry', 'dest' : 'Entry'},# 'conditions' : 'sufficientDrag'},
{'trigger' : 'align', 'source' : 'Entry', 'dest' : 'Heading\nAlignment'},# 'conditions' : 'Mach_less_than_M_align'},
{'trigger' : 'ignite', 'source' : 'Heading\nAlignment', 'dest' : 'SRP'}]
sim = SimulationGraph( model = None,
states = states,
transitions = transitions,
auto_transitions = False,
initial = states[0],
title = 'EDL Simulation',
show_conditions = True)
return sim
def fsmGif(states = range(4)):
'''
Creates a GIF out of individual images, named after their respective states.
Inputs:
states : List of states
Outputs:
.gif file
'''
from images2gif import writeGif
from PIL import Image
files = ['SimulationFSM-{}.png'.format(state) for state in states]
images = [Image.open(file) for file in files]
size = (750,750)
for image in images:
image.thumbnail(size,Image.ANTIALIAS)
output = 'SimulationFSM.gif'
writeGif(output,images,duration=1)
if __name__ == '__main__':
testDASim(None, None)
# sim = testFullSim()
# sim.plot(compare=False)
# sim.show()
| gpl-3.0 |
smartscheduling/scikit-learn-categorical-tree | sklearn/tests/test_pipeline.py | 10 | 14095 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/series/test_alter_axes.py | 3 | 6584 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import Index, Series
from pandas.core.index import MultiIndex, RangeIndex
from pandas.compat import lrange, range, zip
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesAlterAxes(TestData):
def test_setindex(self):
# wrong type
series = self.series.copy()
pytest.raises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
pytest.raises(Exception, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
assert isinstance(series.index, Index)
def test_rename(self):
renamer = lambda x: x.strftime('%Y%m%d')
renamed = self.ts.rename(renamer)
assert renamed.index[0] == renamer(self.ts.index[0])
# dict
rename_dict = dict(zip(self.ts.index, renamed.index))
renamed2 = self.ts.rename(rename_dict)
assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')
renamed = s.rename({'b': 'foo', 'd': 'bar'})
tm.assert_index_equal(renamed.index, Index(['a', 'foo', 'c', 'bar']))
# index with name
renamer = Series(np.arange(4),
index=Index(['a', 'b', 'c', 'd'], name='name'),
dtype='int64')
renamed = renamer.rename({})
assert renamed.index.name == renamer.index.name
def test_rename_by_series(self):
s = Series(range(5), name='foo')
renamer = Series({1: 10, 2: 20})
result = s.rename(renamer)
expected = Series(range(5), index=[0, 10, 20, 3, 4], name='foo')
tm.assert_series_equal(result, expected)
def test_rename_set_name(self):
s = Series(range(4), index=list('abcd'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
result = s.rename(name)
assert result.name == name
tm.assert_numpy_array_equal(result.index.values, s.index.values)
assert s.name is None
def test_rename_set_name_inplace(self):
s = Series(range(3), index=list('abc'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
s.rename(name, inplace=True)
assert s.name == name
exp = np.array(['a', 'b', 'c'], dtype=np.object_)
tm.assert_numpy_array_equal(s.index.values, exp)
def test_set_name_attribute(self):
s = Series([1, 2, 3])
s2 = Series([1, 2, 3], name='bar')
for name in [7, 7., 'name', datetime(2001, 1, 1), (1,), u"\u05D0"]:
s.name = name
assert s.name == name
s2.name = name
assert s2.name == name
def test_set_name(self):
s = Series([1, 2, 3])
s2 = s._set_name('foo')
assert s2.name == 'foo'
assert s.name is None
assert s is not s2
def test_rename_inplace(self):
renamer = lambda x: x.strftime('%Y%m%d')
expected = renamer(self.ts.index[0])
self.ts.rename(renamer, inplace=True)
assert self.ts.index[0] == expected
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(lrange(10))
s.index = idx
assert s.index.is_all_dates
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ['hash', 'category']
ser.name = 'value'
df = ser.reset_index()
assert 'value' in df
df = ser.reset_index(name='value2')
assert 'value2' in df
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
assert_series_equal(s, s2)
# level
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
assert len(rs.columns) == 2
rs = s.reset_index(level=[0, 2], drop=True)
tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))
assert isinstance(rs, Series)
def test_reset_index_range(self):
# GH 12071
s = pd.Series(range(2), name='A', dtype='int64')
series_result = s.reset_index()
assert isinstance(series_result.index, RangeIndex)
series_expected = pd.DataFrame([[0, 0], [1, 1]],
columns=['index', 'A'],
index=RangeIndex(stop=2))
assert_frame_equal(series_result, series_expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(['L0', 'L1', 'L2'])
assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = Series(np.arange(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = Series(range(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels(['L0', 'L0', 'L0'])
assert_series_equal(result, expected)
| mit |
DataReplyUK/datareplyuk | eu_tweet_classifier/train_model.py | 1 | 20044 | # General IMPORTS --------------------------------------------------------------------------------------------------#
import os
import re
import sys
import pickle
import pandas
import random
import itertools
import collections
import matplotlib.pyplot as plt
# NLTK IMPORTS -----------------------------------------------------------------------------------------------------#
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
# PYSPARK PATH SETUP AND IMPORTS -----------------------------------------------------------------------------------#
os.environ['SPARK_HOME'] = "/Users/path/to/spark-1.6.1-bin-hadoop2.6" # Path to source folder
# Append pyspark to Python Path
sys.path.append("/Users/path/to/spark-1.6.1-bin-hadoop2.6/python")
sys.path.append("/Users/path/to/spark-1.6.1-bin-hadoop2.6/python/lib/py4j-0.9-src.zip")
try:
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.sql import Row
from pyspark.sql import SQLContext
from pyspark.mllib.linalg import SparseVector
from pyspark.accumulators import AccumulatorParam
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
print ("Successfully imported Spark Modules")
except ImportError as e:
print ("Can not import Spark Modules", e)
sys.exit(1)
# GLOBAL VARIABLES -------------------------------------------------------------------------------------------------#
sc = SparkContext('local[4]', 'EU_Tweet_Sentiment_Analyser') # Instantiate a SparkContext object
sqlContext = SQLContext(sc) # Instantiate a sqlContext object
# SUB-FUNCTIONS-----------------------------------------------------------------------------------------------------#
def filter_tweet(tweet):
tweet = re.sub("(htt.* ?)", " ", tweet) # captures all occurences of "http" followed or not followed by a space
tweet = re.sub("(www.* ?)", " ", tweet) # same
tweet = re.sub("RT ", "", tweet) # removes leading RTs
tweet = re.sub("([@|#].*?)", " ", tweet) # removes handles/hastags
tweet = re.sub("([/| |'|(|+|-]\d+[\.| |/|;|?|%|:|,|'|(|)|+|-]\d*.?)", " ", tweet) # removes floating point numbers
tweet = re.sub("( \d+.? )", " ", tweet) # removes numbers!
# further abnormalities
tweet = re.sub("([ |.]\d+[-|\.]\d*.? )", " ", tweet)
tweet = re.sub("(\d+-\d+.?)", "", tweet)
return tweet
def lemmatize(tweet_words):
# Instantiate lemmatization-object
wordnet_lemmatizer = WordNetLemmatizer()
# Lemmatize: lowering stinr is necessary in this step - unfortunately it strips words of semantics
# (e.g CAPITALS are usually used for shouting!)
for i in range(len(tweet_words)):
tweet_words[i] = wordnet_lemmatizer.lemmatize(tweet_words[i].lower())
return tweet_words
def negation_tokenizer(tweet_words):
# regex to match negation tokens
negation_re = re.compile("""(?x)(?:^(?:never|no|nothing|nowhere|
noone|none|not|havent|hasnt|hadnt|cant|couldnt|shouldnt|wont|wouldnt|dont|doesnt|didnt|isnt|arent|aint)$)|n't""")
alter_re = re.compile("""(?x)(?:^(?:but|however|nevertheless|still|though|tho|yet)$)""")
neg_tweed_words = []
append_neg = False # stores whether to add "_NEG"
for token in tweet_words:
# If append_neg is False
if append_neg == False:
# Check if the current token is a negation
if negation_re.match(token):
append_neg = True
# but if a negation has been previously identified, check if this is an alteration
elif alter_re.match(token):
append_neg = False
# or if another negation appears
elif negation_re.match(token):
append_neg = False
# and if not then append the suffix
else:
token += "_NEG"
# append the new token in the return list
neg_tweed_words.append(token)
return neg_tweed_words
def filter_stop_words(tweet_words):
return [word for word in tweet_words if word not in stop_words_bv.value]
def populate_with(number_of_tweets, tweets, label, selected_tweets, labels):
for i in range(number_of_tweets):
r = random.randint(0, len(tweets)-1)
selected_tweets.append(tweets[r])
if label == "STAY":
labels.append(1)
else:
labels.append(0)
del tweets[r]
return selected_tweets, labels
def tf(tokens):
""" Compute TF
Args:
tokens (list of str): input list of tokens from tokenize
Returns:
dictionary: a dictionary of tokens to its TF values
"""
counts = {}
length = len(tokens)
for t in tokens:
counts.setdefault(t, 0.0)
counts[t] += 1
return {t: counts[t] / length for t in counts}
def idfs(corpus):
""" Compute IDF
Args:
corpus (RDD): input corpus
Returns:
RDD: a RDD of (token, IDF value)
"""
N = corpus.count()
# The result of the next line will be a list with distinct tokens...
unique_tokens = corpus.flatMap(lambda x: list(set(x))) # No more records! FLATMAP --> unique_tokens is ONE SINGLE LIST
token_count_pair_tuple = unique_tokens.map(lambda x: (x, 1)) # every element in the list will become a pair!
token_sum_pair_tuple = token_count_pair_tuple.reduceByKey(lambda a, b: a + b) # same elements in lists are aggregated
return token_sum_pair_tuple.map(lambda x: (x[0], float(N) / x[1])) # compute weight
def tfidf(tokens, idfs):
""" Compute TF-IDF
:param tokens: tokens (list of str): input list of tokens from tokenize.
:param idfs: record to IDF value.
:return: dictionary: a dictionary of records to TF-IDF values
"""
tfidfs = {}
tfs = tf(tokens)
for t in tfs:
tfidfs.setdefault(t, 0.0)
tfidfs[t] += 1
return {t: tfs[t] * idfs[t] for t in tfidfs if t in idfs}
def featurize(tokens_kv):
"""
:param tokens_kv: list of tuples of the form (word, tf-idf score)
:param dictionary: list of n words
:return: sparse_vector of size n
"""
# MUST sort tokens_kv by key
tokens_kv = collections.OrderedDict(sorted(tokens_kv.items()))
vector_size = len(Dictionary_BV.value)
non_zero_indexes = []
index_tfidf_values = []
for key, value in tokens_kv.iteritems():
index = 0
for word in Dictionary_BV.value:
if key == word:
non_zero_indexes.append(index)
index_tfidf_values.append(value)
index += 1
return SparseVector(vector_size, non_zero_indexes, index_tfidf_values)
def final_form_4_training(SVs, labels):
"""
:param SVs: List of Sparse vectors.
:param labels: List of labels
:return: list of labeledpoint objects
"""
to_train = []
for i in range(len(labels)):
to_train.append(LabeledPoint(labels[i], SVs[i]))
return to_train
# MAIN -------------------------------------------------------------------------------------------------------------#
if __name__ == "__main__":
# LOAD TWEETS --------------------------------------------------------------------------------------------------#
# Load Corpus using Pandas
dataFrame = pandas.read_csv('/Users/path/to/corpus.csv',
header=None,
names=['name', 'screen_name', 'id', 'created_at', 'text', 'label'])
# Load Columns as Arrays (Notice: first element = column name)
tweets = dataFrame['text']
del tweets[0]
labels = dataFrame['label']
del labels[0]
# CREATE TRAINING CORPUS / CROSS-VALIDATION SET / TEST SET ----------------------------------------------------#
# Instantiate Tweet RDDS
labels_RDD = sc.parallelize(labels, 4)
total_tweets = labels_RDD.count()
print "Total tweets: %d" % total_tweets
pos_tweets = labels_RDD.filter(lambda x: x == "Stay").count()
print "Pos tweets: %d" % pos_tweets
neg_tweets = pos_tweets = labels_RDD.filter(lambda x: x == "Leave").count()
print "Neg tweets: %d" % pos_tweets
# Break tweets between positive and negative
pos_tweets = []
neg_tweets = []
for (tweet, label) in itertools.izip(tweets, labels):
if label == "Stay":
pos_tweets.append(tweet)
else:
neg_tweets.append(tweet)
# Divide respectively to 85%-7.5%-7.5%
training_no = int(min(len(pos_tweets), len(neg_tweets)) * 85 / 100)
cross_validation_no = int(min(len(pos_tweets), len(neg_tweets)) * 7.5 / 100)
test_no = min(len(pos_tweets), len(neg_tweets)) - training_no - cross_validation_no
# Training Set
training_set = []
training_labels = []
(training_set, training_labels) = populate_with(training_no, pos_tweets, "STAY", training_set, training_labels)
(training_set, training_labels) = populate_with(training_no, neg_tweets, "LEAVE", training_set, training_labels)
# Cross-Validation Set
cross_validation_set = []
cross_validation_labels = []
(cross_validation_set, cross_validation_labels) = populate_with(cross_validation_no, pos_tweets, "STAY",
cross_validation_set, cross_validation_labels)
(cross_validation_set, cross_validation_labels) = populate_with(cross_validation_no, neg_tweets, "LEAVE",
cross_validation_set, cross_validation_labels)
# Test Set
test_set = []
test_labels = []
(test_set, test_labels) = populate_with(cross_validation_no, pos_tweets, "STAY", test_set, test_labels)
(test_set, test_labels) = populate_with(cross_validation_no, neg_tweets, "LEAVE",test_set, test_labels)
# TOKENIZE TRAINING SET ----------------------------------------------------------------------------------------#
# Instantiate Training RDD
training_RDD = sc.parallelize(training_set, 4)
# Instantiate tokenizer-object
tokenizer = RegexpTokenizer(r'(\w+)') # Removes "RT @" and keeps only [a-zA-Z0-9] and '_'
# Stop_Words set
delete = {'should', 'don', 'again', 'not'} # Make NLTK's stopwords list more sentiment-aware
stopWords = set(stopwords.words('english')).difference(delete)
stop_words_bv = sc.broadcast(stopWords)
# Tokenize through Spark Transformations
wordsByTweet = (training_RDD.map(lambda tweet: tweet.decode("ascii", "ignore").encode("ascii"))
.map(filter_tweet)
.map(tokenizer.tokenize)
.map(lemmatize)
.map(filter_stop_words)
.map(negation_tokenizer)
.cache())
# SHOW CORPUS -------------------------------------------------------------------------------------------------#
corpus_RDD = wordsByTweet.collect()
print '\n'.join(map(lambda x: '{0}'.format(x), corpus_RDD))
# CREATE A DICTIONARY ------------------------------------------------------------------------------------------#
print("---------------------------------------------------------------------------------------------------------")
raw_input("Produce TF-IDF scores...")
dictionary_RDD_IDFs = idfs(wordsByTweet)
unique_token_count = dictionary_RDD_IDFs.count()
print 'There are %s unique tokens in the dataset.' % unique_token_count
IDFS_Tokens_Sample = dictionary_RDD_IDFs.takeOrdered(25, lambda s: s[1])
print("This is a dictionary sample of 25 words:")
print '\n'.join(map(lambda (token, idf_score): '{0}: {1}'.format(token, idf_score), IDFS_Tokens_Sample))
# Create a broadcast variable for the weighted dictionary (sorted)
dictionary_RDD_IDFs_Weights = dictionary_RDD_IDFs.sortBy(lambda (token, score): score).collectAsMap()
IDFS_weights_BV = sc.broadcast(dictionary_RDD_IDFs_Weights)
# Write IDFS_weights_BV as python dictionary to a file
output = open('/Users/path/to/dictionary_RDD_IDFs_Weights.pkl', 'wb')
pickle.dump(dictionary_RDD_IDFs_Weights, output)
output.close()
print IDFS_weights_BV.value
# CREATE A HISTOGRAM -------------------------------------------------------------------------------------------#
print("--------------------------------------------------------------------------------------------------------")
raw_input("Create an IDF-scores histogram...")
IDFs_values = dictionary_RDD_IDFs.map(lambda s: s[1]).collect()
fig = plt.figure(figsize=(8, 3))
plt.hist(IDFs_values, 50, log=True)
plt.show()
# PRE-COMPUTE TF-IDF WEIGHTS: Build Weight Vectors -------------------------------------------------------------#
print("--------------------------------------------------------------------------------------------------------")
raw_input("Produce the TF-IDF scores...")
TFsIDFs_Vector_Weights_RDDs = wordsByTweet.map(lambda tokens: (tfidf(tokens, IDFS_weights_BV.value))).cache()
print '\n'.join(map(lambda words: '{0}'.format(words), TFsIDFs_Vector_Weights_RDDs.take(10)))
# BEGIN CREATION OF FEATURE VECTOR ------------------------------------------------------------------------------#
print("--------------------------------------------------------------------------------------------------------")
raw_input("Create an ordered dictionary for feature extraction...")
# Create an ordered dictionary of the N first words
Dictionary = (dictionary_RDD_IDFs
.sortBy(lambda (token, score): score)
.map(lambda (token, score): token)
.collect()) # N = all-->collect(), otherwise use take(N)
print("This is the complete dictionary, ordered based on idf scores:")
print '\n'.join(map(lambda token: '{0}'.format(token), Dictionary))
print("--------------------------------------------------------------------------------------------------------")
# Create a broadcast variable for the Dictionary
# Dictionary MUST be sorted. If not sparse-vectors in the featurize function will throw exception.
Dictionary_Sorted = sorted(Dictionary)
Dictionary_BV = sc.broadcast(Dictionary_Sorted)
# Save ordered Dictionary
output = open("/Users/path/to/Dictionary.txt", "wb")
output.write("\n".join(map(lambda x: str(x), Dictionary_Sorted)))
output.close()
# Feature Extraction
Training_Set_Vectors = (TFsIDFs_Vector_Weights_RDDs
.map(lambda (tokens): featurize(tokens))
.collect())
# GENERATE LABELEDPOINT PARAMETER TO LOAD TO THE TRAIN METHOD ---------------------------------------------------#
print("--------------------------------------------------------------------------------------------------------")
raw_input("Generate the LabeledPoint parameter... ")
labelled_training_set_RDD = sc.parallelize(final_form_4_training(Training_Set_Vectors, training_labels))
# TRAIN MODEL ---------------------------------------------------------------------------------------------------#
print("--------------------------------------------------------------------------------------------------------")
raw_input("Train the model... ")
model = NaiveBayes.train(labelled_training_set_RDD, 1.0)
# CROSS-VALIDATE MODEL ------------------------ -----------------------------------------------------------------#
print("--------------------------------------------------------------------------------------------------------")
raw_input("Cross-Validate the model... ")
# Instantiate Cross_Validation RDD
CV_RDD = sc.parallelize(cross_validation_set, 4)
# Tokenize through Spark Transformations
CV_wordsByTweet = (CV_RDD.map(lambda tweet_2: tweet_2.decode("ascii", "ignore").encode("ascii"))
.map(filter_tweet)
.map(tokenizer.tokenize)
.map(lemmatize)
.map(filter_stop_words)
.map(negation_tokenizer)
.cache())
print("Cross Validation set loaded and tokenised... ")
# Compute TF-IDF scores
raw_input("Produce the TF-IDF scores for Cross-Validation Set...")
CV_TFsIDFs_Vector_Weights_RDDs = (CV_wordsByTweet
.map(lambda tokens: (tfidf(tokens, IDFS_weights_BV.value)))
.cache())
# Feature Extraction
raw_input("Extract Features for Cross-Validation Set...")
CV_Set_Vectors = (CV_TFsIDFs_Vector_Weights_RDDs
.map(lambda (tokens): featurize(tokens))
.collect())
# Generate labelledppoint parameter...
raw_input("Generate the LabeledPoint parameter... ")
labelled_CV_set_RDD = sc.parallelize(final_form_4_training(CV_Set_Vectors, cross_validation_labels))
# Compute Accuracy
print("--------------------------------------------------------------------------------------------------------")
raw_input("Compute model CV-accuracy...")
predictionAndLabel = labelled_CV_set_RDD.map(lambda x: (model.predict(x.features), x.label))
accuracy = 100.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / labelled_CV_set_RDD.count()
print ("Model Accuracy is: {0:.2f}%".format(accuracy))
print("--------------------------------------------------------------------------------------------------------")
# TEST MODEL ---------------------------------------------------------------------------------------------------#
print("--------------------------------------------------------------------------------------------------------")
raw_input("Test the model... ")
# Instantiate Cross_Validation RDD
test_RDD = sc.parallelize(test_set, 4)
# Tokenize through Spark Transformations
test_wordsByTweet = (test_RDD.map(lambda tweet_4: tweet_4.decode("ascii", "ignore").encode("ascii"))
.map(filter_tweet)
.map(tokenizer.tokenize)
.map(lemmatize)
.map(filter_stop_words)
.map(negation_tokenizer)
.cache())
print("Test set loaded and tokenised... ")
# Compute TF-IDF scores
raw_input("Produce the TF-IDF scores for Test Set...")
test_TFsIDFs_Vector_Weights_RDDs = (test_wordsByTweet
.map(lambda tokens: (tfidf(tokens, IDFS_weights_BV.value)))
.cache())
# Feature Extraction
raw_input("Extract Features for Cross-Validation Set...")
test_Set_Vectors = (test_TFsIDFs_Vector_Weights_RDDs
.map(lambda (tokens): featurize(tokens))
.collect())
# Generate labelledppoint parameter...
raw_input("Generate the LabeledPoint parameter... ")
labelled_test_set_RDD = sc.parallelize(final_form_4_training(test_Set_Vectors, test_labels))
# Compute Accuracy
print("--------------------------------------------------------------------------------------------------------")
raw_input("Compute model Test-accuracy...")
predictionAndLabel = labelled_test_set_RDD.map(lambda x: (model.predict(x.features), x.label))
accuracy = 100.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / labelled_test_set_RDD.count()
print ("Model Accuracy is: {0:.2f}%".format(accuracy))
print("--------------------------------------------------------------------------------------------------------")
# SAVE MODEL ----------------------------------------------------------------------------------------------------#
model_path = "/Users/path/to/twitter_analytics/NB_model"
model.save(sc, model_path)
# END OF FILE -------------------------------------------------------------------------------------------------------#
| apache-2.0 |
bsipocz/seaborn | doc/conf.py | 25 | 9149 | # -*- coding: utf-8 -*-
#
# seaborn documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 29 23:25:46 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_bootstrap_theme
import matplotlib as mpl
mpl.use("Agg")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('sphinxext'))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'plot_generator',
'plot_directive',
'numpydoc',
'ipython_directive',
'ipython_console_highlighting',
]
# Generate the API documentation when building
autosummary_generate = True
numpydoc_show_class_members = False
# Include the example source for plots in API docs
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'seaborn'
copyright = u'2012-2015, Michael Waskom'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
sys.path.insert(0, os.path.abspath(os.path.pardir))
import seaborn
version = seaborn.__version__
# The full version, including alpha/beta/rc tags.
release = seaborn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'source_link_position': "footer",
'bootswatch_theme': "flatly",
'navbar_sidebarrel': False,
'bootstrap_version': "3",
'navbar_links': [("API", "api"),
("Tutorial", "tutorial"),
("Gallery", "examples/index")],
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', 'example_thumbs']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'seaborndoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'seaborn.tex', u'seaborn Documentation',
u'Michael Waskom', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'seaborn', u'seaborn Documentation',
[u'Michael Waskom'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'seaborn', u'seaborn Documentation',
u'Michael Waskom', 'seaborn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Add the 'copybutton' javascript, to hide/show the prompt in code
# examples, originally taken from scikit-learn's doc/conf.py
def setup(app):
app.add_javascript('copybutton.js')
app.add_stylesheet('style.css')
| bsd-3-clause |
dh4gan/oberon | plot/plot_positions.py | 1 | 1034 | '''
Created on 7/3/14
@author: dh4gan
Show the positions of the bodies in the system
'''
from sys import argv
from matplotlib import pyplot as plt
import io_oberon.io_nbody
# Data file can be read from the command line or from argument
if len(argv)==1:
input_file = raw_input("Enter the datafile: ")
else:
input_file = argv[1]
tmax = 0.0
time, bodyarray, number_bodies = io_oberon.io_nbody.read_nbody_datafile(input_file, tmax)
for i in xrange(number_bodies):
fig = plt.figure(i)
plt.suptitle(str(bodyarray[i].bodytype))
plt.subplot(211)
plt.xlabel("X Position [ AU ]")
plt.ylabel("Y Position [ AU ]")
plt.plot(bodyarray[i].x, bodyarray[i].y, '.', color='red')
plt.subplot(212)
plt.plot(time,bodyarray[i].vx, '.',color='blue', label='$v_x$')
plt.plot(time, bodyarray[i].vy, '.',color='green', label='$v_y$')
plt.xlabel("Time [ years ]")
plt.ylabel("Velocity [ AU / year ]")
plt.legend(loc='lower right')
plt.show()
| gpl-3.0 |
nixingyang/Kaggle-Competitions | Customer Analytics/ensemble.py | 3 | 1660 | import file_operations
import glob
import numpy as np
import os
import pandas as pd
import solution
import time
OLD_SUBMISSION_FOLDER_PATH = solution.SUBMISSION_FOLDER_PATH
NEW_SUBMISSION_FOLDER_PATH = "./"
def perform_ensembling(low_threshold, high_threshold):
print("Reading the submission files from disk ...")
prediction_list = []
for submission_file_path in glob.glob(
os.path.join(OLD_SUBMISSION_FOLDER_PATH, "*.csv")):
if os.path.basename(submission_file_path) < "Aurora_{:.4f}".format(low_threshold) or \
os.path.basename(submission_file_path) > "Aurora_{:.4f}".format(high_threshold):
continue
submission_file_content = pd.read_csv(submission_file_path)
prediction = submission_file_content[
file_operations.LABEL_COLUMN_NAME_IN_SUBMISSION].as_matrix()
prediction_list.append(prediction)
print("Writing the submission files to disk ...")
mean_prediction = np.mean(prediction_list, axis=0)
median_prediction = np.median(prediction_list, axis=0)
for bias, prediction in enumerate([mean_prediction, median_prediction]):
submission_file_name = "Ensemble_{:.4f}_to_{:.4f}_{:d}.csv".format(
low_threshold, high_threshold,
int(time.time()) + bias)
submission_file_path = os.path.join(NEW_SUBMISSION_FOLDER_PATH,
submission_file_name)
submission_file_content[
file_operations.LABEL_COLUMN_NAME_IN_SUBMISSION] = prediction
submission_file_content.to_csv(submission_file_path, index=False)
perform_ensembling(0, 1)
print("All done!")
| mit |
INCF/BIDS2ISATab | bids2isatab/main.py | 1 | 25918 | #!/usr/bin/env python
#
# import modules used here -- sys is a very standard one
from __future__ import print_function
import argparse
import logging
from collections import OrderedDict
from glob import glob
import os
from os.path import exists, join as opj, split as psplit
import sys
import nibabel
import json
import pandas as pd
# map column titles to ontology specs
# based on this info the appropriate additonal column in the ISATab tables are
# generated
ontology_term_map = {
# qualitative information
"Characteristics[organism]": {
'homo sapiens': ('Homo sapiens', 'NCBITAXON', 'NCBITaxon:9606'),
},
"Characteristics[organism part]": {
'brain': ('brain', 'UBERON', 'UBERON:0000955'),
},
"Characteristics[sex]": {
'female': ('female', 'PATO', 'PATO:0000383'),
'f': ('female', 'PATO', 'PATO:0000383'),
'male': ('male', 'PATO', 'PATO:0000384'),
'm': ('male', 'PATO', 'PATO:0000384'),
},
"Characteristics[handedness]": {
'right': ('right', 'PATO', 'PATO:0002203'),
'r': ('right', 'PATO', 'PATO:0002203'),
'left': ('left', 'PATO', 'PATO:0002202'),
'l': ('left', 'PATO', 'PATO:0002202'),
'ambidextrous': ('ambidextrous', 'PATO', 'PATO:0002204'),
'r;l': ('ambidextrous', 'PATO', 'PATO:0002204'),
'l;r': ('ambidextrous', 'PATO', 'PATO:0002204'),
},
# take as is ...
'Parameter Value[4d spacing]': None,
# ...but have dedicated unit column
'Parameter Unit[4d spacing]': {
'millimeter': ('millimiter', 'UO', 'UO:0000016'),
'second': ('second', 'UO', 'UO:0000010'),
'hertz': ('hertz', 'UO', 'UO:0000106'),
'hz': ('hertz', 'UO', 'UO:0000106'),
'ppm': ('parts per million', 'UO', 'UO:0000109'),
'rad': ('radian', 'UO', 'UO:0000123'),
'rads': ('radian', 'UO', 'UO:0000123'),
},
# quantitative information
"Characteristics[age at scan]": ('UO', 'UO:0000036', 'year'),
"Parameter Value[resolution]": ('UO', 'UO:0000016', 'millimeter'),
"Parameter Value[repetition time]": ('UO', 'UO:0000010', 'second'),
"Parameter Value[magnetic field strength]": ('UO', 'UO:0000228', 'tesla'),
"Parameter Value[flip angle]": ('UO', 'UO:0000185', 'degree'),
"Parameter Value[echo time]": ('UO', 'UO:0000010', 'second'),
"Parameter Value[sampling frequency]": ('UO', 'UO:0000106', 'hertz'),
# no associated term, keep but leave untouched
"Parameter Value[instrument name]": None,
"Parameter Value[instrument manufacturer]": None,
"Parameter Value[instrument software version]": None,
"Parameter Value[coil type]": None,
"Parameter Value[sequence]": None,
# TODO next two maybe factor values?
"Parameter Value[recording label]": None,
"Parameter Value[acquisition label]": None,
"Parameter Value[content description]": None,
# Keep any task factor, and any of the two task term sources
# of which one will get used (whatever is found first)
"Factor Value[task]": None,
'Parameter Value[CogAtlasID]': None,
'Parameter Value[CogPOID]': None,
'Protocol REF': None,
'Sample Name': None,
'Assay Name': None,
'Raw Data File': None,
# modality should get proper terms attached
'Parameter Value[modality]': None,
# not sure if there are terms for SENSE and GRAPPA etc. anywhere
'Parameter Value[parallel acquisition technique]': None,
}
# translate from what we find in BIDS or a DICOM dump into the
# names that ScientificData prefers
# matching will be done on lower case string
# add any synonyms or additions as entries toi this dictionary
parameter_name_map = {
"manufacturermodelname": "instrument name",
"manufacturer": "instrument manufacturer",
"hardcopydevicesoftwareversion": "instrument software version",
"receivecoilname": "coil type",
"magneticfieldstrength": "magnetic field strength",
"receivecoilname": "coil type",
"echotime": "echo time",
"repetitiontime": "repetition time",
"flipangle": "flip angle",
"pulsesequencetype": "sequence",
"parallelacquisitiontechnique": "parallel acquisition technique",
"samplingfrequency": "sampling frequency",
"contentdescription": "content description",
}
# standardize columns from participants.tsv
sample_property_name_map = {
"age": "Characteristics[age at scan]",
"gender": "Characteristics[sex]",
"handedness": "Characteristics[handedness]",
"participant_id": "Sample Name",
"sex": "Characteristics[sex]",
}
def get_bids_metadata(bids_root, basepath):
"""Query the BIDS meta data JSON file hierarchy
Parameters
----------
bids_root : path
Path to the root of the BIDS dataset
basepath : path
Relative path to the file (filename without extension, e.g. no '.nii.gz')
for which meta data shall be queried.
"""
sidecar_json = '{}.json'.format(basepath)
path_components = psplit(sidecar_json)
filename_components = path_components[-1].split("_")
session_level_componentList = []
subject_level_componentList = []
top_level_componentList = []
ses = None
sub = None
for filename_component in filename_components:
if filename_component[:3] != "run":
session_level_componentList.append(filename_component)
if filename_component[:3] == "ses":
ses = filename_component
else:
subject_level_componentList.append(filename_component)
if filename_component[:3] == "sub":
sub = filename_component
else:
top_level_componentList.append(filename_component)
# the top-level should have at least two components, e.g. task and modality
# but could also have more, e.g. task, recording and modality
# query sidecars for each single-component plus modality
potential_jsons = []
for comp in top_level_componentList[:-1]:
potential_jsons.append(
opj(bids_root, "_".join([comp, top_level_componentList[-1]])))
# and one for all components combined
potential_jsons.append(opj(bids_root, "_".join(top_level_componentList)))
subject_level_json = opj(bids_root, sub, "_".join(subject_level_componentList))
potential_jsons.append(subject_level_json)
if ses:
session_level_json = opj(bids_root, sub, ses, "_".join(session_level_componentList))
potential_jsons.append(session_level_json)
potential_jsons.append(sidecar_json)
merged_param_dict = {}
for json_file_path in potential_jsons:
if exists(json_file_path):
param_dict = json.load(open(json_file_path, "r"))
merged_param_dict.update(param_dict)
return merged_param_dict
def get_chainvalue(chain, src):
try:
for key in chain:
src = src[key]
return src
except KeyError:
return None
def get_keychains(d, dest, prefix):
if isinstance(d, dict):
for item in d:
dest = get_keychains(d[item], dest, prefix + [item])
else:
if d and not (d == 'UNDEFINED'):
# ignore empty stuff
dest = dest.union((tuple(prefix),))
return dest
def _get_study_df(bids_directory):
subject_ids = []
study_dict = OrderedDict()
for file in glob(opj(bids_directory, "sub-*")):
if os.path.isdir(file):
subject_ids.append(psplit(file)[-1][4:])
subject_ids.sort()
study_dict["Source Name"] = subject_ids
study_dict["Characteristics[organism]"] = "homo sapiens"
study_dict["Characteristics[organism part]"] = "brain"
study_dict["Protocol REF"] = "Participant recruitment"
study_dict["Sample Name"] = subject_ids
df = pd.DataFrame(study_dict)
participants_file = opj(bids_directory, "participants.tsv")
if not exists(participants_file):
return df
participants_df = pd.read_csv(participants_file, sep="\t")
rename_rule = sample_property_name_map.copy()
# remove all mapping that do not match the columns at hand
for r in rename_rule.keys():
if not r in participants_df.keys():
del rename_rule[r]
# turn all unknown properties into comment columns
for c in participants_df.keys():
if not c in rename_rule:
rename_rule[c] = "Comment[{}]".format(c.lower())
participants_df.rename(columns=rename_rule, inplace=True)
# simplify sample names by stripping the common prefix
participants_df["Sample Name"] = \
[s[4:] for s in list(participants_df["Sample Name"])]
# merge participant info with study info
df = pd.merge(
df,
participants_df,
left_on="Sample Name",
right_on="Sample Name")
return df
def _describe_file(fpath, bids_directory):
fname = psplit(fpath)[-1]
fname_components = fname.split(".")[0].split('_')
info = {
'Sample Name': fname_components[0][4:],
# assay name is the entire filename except for the modality suffix
# so that, e.g. simultaneous recordings match wrt to the assay name
# across assay tables
'Assay Name': '_'.join(fname_components[:-1]),
'Raw Data File': fpath[len(bids_directory):],
'Parameter Value[modality]': fname_components[-1]
}
comp_dict = dict([c.split('-') for c in fname_components[:-1]])
for l in ('rec', 'recording'):
if l in comp_dict:
info['Parameter Value[recording label]'] = comp_dict[l]
for l in ('acq', 'acquisition'):
if l in comp_dict:
info['Parameter Value[acquisition label]'] = comp_dict[l]
if 'task' in comp_dict:
info['Factor Value[task]'] = comp_dict['task']
info['other_fields'] = get_bids_metadata(
bids_directory,
'_'.join(fname_components)
)
return info
def _describe_mri_file(fpath, bids_directory):
info = _describe_file(fpath, bids_directory)
if not exists(fpath):
# this could happen in the case of a dead symlink in,
# e.g., a git-annex repo
logging.warn(
"cannot extract meta data from '{}'".format(fpath))
return info
header = nibabel.load(fpath).get_header()
spatial_unit = header.get_xyzt_units()[0]
# by what factor to multiply by to get to 'mm'
if spatial_unit == 'unknown':
logging.warn(
"unit of spatial resolution for '{}' unkown, assuming 'millimeter'".format(
fpath))
spatial_unit_conversion = {
'unknown': 1,
'meter': 1000,
'mm': 1,
'micron': 0.001}.get(spatial_unit, None)
if spatial_unit_conversion is None:
raise RuntimeError("unexpected spatial unit code '{}' from NiBabel".format(
spatial_unit))
info['Parameter Value[resolution]'] = "x".join(
[str(i * spatial_unit_conversion) for i in header.get_zooms()[:3]])
if len(header.get_zooms()) > 3:
# got a 4th dimension
rts_unit = header.get_xyzt_units()[1]
if rts_unit == 'unknown':
logging.warn(
"RTS unit '{}' unkown, assuming 'seconds'".format(
fpath))
# normalize to seconds, if possible
rts_unit_conversion = {
'msec': 0.001,
'micron': 0.000001}.get(rts_unit, 1.0)
info['Parameter Value[4d spacing]'] = header.get_zooms()[3] * rts_unit_conversion
if rts_unit in ('hz', 'ppm', 'rads'):
# not a time unit
info['Parameter Unit[4d spacing]'] = rts_unit
else:
info['Parameter Unit[4d spacing]'] = 'second'
return info
def _get_file_matches(bids_directory, glob_pattern):
files = glob(
opj(bids_directory, "sub-*", "*", "sub-{}".format(glob_pattern)))
files += glob(
opj(bids_directory, "sub-*", "ses-*", "*", "sub-*_ses-{}".format(
glob_pattern)))
return files
def _get_mri_assay_df(bids_directory, modality):
# locate MRI files
files = _get_file_matches(bids_directory, '*_{}.nii.gz'.format(modality))
df, params = _get_assay_df(
bids_directory,
modality,
"Magnetic Resonance Imaging",
files,
_describe_mri_file)
return df, params
def _get_assay_df(bids_directory, modality, protocol_ref, files, file_descr):
assay_dict = OrderedDict()
assay_dict["Protocol REF"] = protocol_ref
finfos = []
info_keys = set()
for fname in files:
finfo = file_descr(fname, bids_directory)
info_keys = info_keys.union(finfo.keys())
finfos.append(finfo)
collector_dict = dict(zip(info_keys, [[] for i in range(len(info_keys))]))
for finfo in finfos:
for spec in info_keys:
fspec = finfo.get(spec, None)
collector_dict[spec].append(fspec)
for k in collector_dict:
if k == 'other_fields':
# special case dealt with below
continue
# skip empty
if not all([v is None for v in collector_dict[k]]):
assay_dict[k] = collector_dict[k]
# record order of parameters; needs to match order in above loop
mri_par_names = ["Resolution", "Modality"]
# determine the union of any additional fields found for any file
new_fields = set()
for d in collector_dict.get('other_fields', []):
new_fields = get_keychains(d, new_fields, [])
# create a parameter column for each of them
for field in new_fields:
# deal with nested structures by concatenating the field names
field_name = ':'.join(field)
# normalize parameter names
field_name = parameter_name_map.get(field_name.lower(), field_name)
# final column ID
column_id = "Parameter Value[{}]".format(field_name)
assay_dict[column_id] = []
# and fill with content from files
for d in collector_dict['other_fields']:
assay_dict[column_id].append(get_chainvalue(field, d))
if 'Assay Name' in assay_dict:
df = pd.DataFrame(assay_dict)
df = df.sort_values(['Assay Name'])
return df, mri_par_names # TODO investigate necessity for 2nd return value
else:
return pd.DataFrame(), []
def _get_investigation_template(bids_directory, mri_par_names):
this_path = os.path.realpath(
__file__[:-1] if __file__.endswith('.pyc') else __file__)
template_path = opj(
*(psplit(this_path)[:-1] + ("i_investigation_template.txt", )))
investigation_template = open(template_path).read()
title = psplit(bids_directory)[-1]
if exists(opj(bids_directory, "dataset_description.json")):
with open(opj(bids_directory, "dataset_description.json"), "r") \
as description_dict_fp:
description_dict = json.load(description_dict_fp)
if "Name" in description_dict:
title = description_dict["Name"]
investigation_template = investigation_template.replace(
"[TODO: TITLE]", title)
investigation_template = investigation_template.replace(
"[TODO: MRI_PAR_NAMES]", ";".join(mri_par_names))
return investigation_template
def _drop_from_df(df, drop):
if drop is None:
return df
elif drop == 'unknown':
# remove anything that isn't white-listed
drop = [k for k in df.keys() if not k in ontology_term_map]
elif isinstance(drop, (tuple, list)):
# is list of parameter names to drop
drop = ['Parameter Value[{}]'.format(d) for d in drop]
# at this point drop is some iterable
# filter assay table and take out matching parameters
for k in df.keys():
if k in drop:
print('dropping %s from output' % k)
df.drop(k, axis=1, inplace=True)
return df
def _item_sorter_key(item):
# define custom column order for tables
name = item[0]
if name in ('Sample Name', 'Source Name'):
return 0
elif name.startswith('Characteristics['):
return 1
elif name.startswith('Factor Value['):
return 2
elif name.startswith('Protocol REF'):
return 3
elif name.startswith('Parameter Value['):
return 4
elif name == 'Raw Data File':
return 5
elif name == 'Assay Name':
return 6
elif name.startswith('Comment['):
return 10
elif name.startswith('Parameter Unit['):
# put them at the very end so we discover them last when adding
# ontology terms
return 99
def _sort_df(df):
return pd.DataFrame.from_items(sorted(df.iteritems(), key=_item_sorter_key))
def _extend_column_list(clist, addition, after=None):
if after is None:
for a in addition:
clist.append(a)
else:
tindex = None
for i, c in enumerate(clist):
if c[0] == after:
tindex = i
if tindex is None:
raise ValueError("cannot find column '{}' in list".format(after))
for a in addition:
clist.insert(tindex + 1, a)
tindex += 1
def _df_with_ontology_info(df):
items = []
# check whether we need ontology info for a task factor
need_task_terms = False
for col, val in df.iteritems():
# check if we know something about this column
term_map = ontology_term_map.get(col, None)
if term_map is None:
new_columns = [(col, val)]
elif isinstance(term_map, tuple):
# this is quantitative information -> 4-column group
new_columns = [(col, val),
('Unit', term_map[2]),
('Term Source REF', term_map[0]),
('Term Accession Number', term_map[1])]
elif isinstance(term_map, dict):
# this is qualitative information -> 3-column group
normvals = []
refs = []
acss = []
for v in val:
normval, ref, acs = term_map.get(
v.lower() if hasattr(v, 'lower') else v,
(None, None, None))
normvals.append(normval)
refs.append(ref)
acss.append(acs)
if v and normval is None:
logging.warn("unknown value '{}' for '{}' (known: {})".format(
v, col, term_map.keys()))
new_columns = [(col, normvals),
('Term Source REF', refs),
('Term Accession Number', acss)]
# merged addition with current set of columns
if col.startswith('Parameter Unit['):
# we have a unit column plus terms, insert after matching
# parameter value column
after = 'Parameter Value[{}]'.format(col[15:-1])
new_columns[0] = ('Unit', new_columns[0][1])
elif col == 'Factor Value[task]':
# flag that we ought to be looking for task info
need_task_terms = True
elif col in ('Parameter Value[CogPOID]',
'Parameter Value[CogAtlasID]'):
if not need_task_terms:
after = None
new_columns = []
else:
after = 'Factor Value[task]'
# TODO check with Varsha how those could be formated
terms = [v.strip('/').split('/')[-1] if v is not None else None
for v in val]
source_refs = [v[:-(len(terms[i]))] if terms[i] is not None else None
for i, v in enumerate(val)]
new_columns = [('Term Source REF', source_refs),
('Term Accession Number', terms)]
# ignore a possible second term set
need_task_terms = False
else:
# straight append
after = None
_extend_column_list(items, new_columns, after)
return pd.DataFrame.from_items(items)
def _store_beautiful_table(df, output_directory, fname, repository_info=None):
df = _sort_df(df)
df = _df_with_ontology_info(df)
if repository_info:
df['Comment[Data Repository]'] = repository_info[0]
df['Comment[Data Record Accession]'] = repository_info[1]
df['Comment[Data Record URI]'] = repository_info[2]
df.to_csv(
opj(output_directory, fname),
sep="\t",
index=False)
def extract(
bids_directory,
output_directory,
drop_parameter=None,
repository_info=None):
if not exists(output_directory):
logging.info(
"creating output directory at '{}'".format(output_directory))
os.makedirs(output_directory)
# generate: s_study.txt
_store_beautiful_table(
_get_study_df(bids_directory),
output_directory,
"s_study.txt")
# all imaging modalities recognized in BIDS
for modality in ('T1w', 'T2w', 'T1map', 'T2map', 'FLAIR', 'FLASH', 'PD',
'PDmap', 'PDT2', 'inplaneT1', 'inplaneT2', 'angio', 'dwi',
'sbref', 'bold', 'defacemask', 'SWImagandphase'):
# generate: a_assay.txt
mri_assay_df, mri_par_names = _get_mri_assay_df(bids_directory, modality)
if not len(mri_assay_df):
# not files found, try next
logging.info(
"no files match MRI modality '{}', skipping".format(modality))
continue
_drop_from_df(mri_assay_df, drop_parameter)
_store_beautiful_table(
mri_assay_df,
output_directory,
"a_mri_{}.txt".format(modality.lower()),
repository_info)
# physio
df, params = _get_assay_df(
bids_directory,
'physio',
"Physiological recordings",
_get_file_matches(bids_directory, '*_physio.tsv.gz'),
_describe_file)
if len(df):
_store_beautiful_table(
_drop_from_df(df, drop_parameter),
output_directory,
'a_physiology.txt',
repository_info)
# stimulus
df, params = _get_assay_df(
bids_directory,
'stim',
"Stimulation",
_get_file_matches(bids_directory, '*_stim.tsv.gz'),
_describe_file)
if len(df):
_store_beautiful_table(
_drop_from_df(df, drop_parameter),
output_directory,
'a_stimulation.txt',
repository_info)
# generate: i_investigation.txt
investigation_template = _get_investigation_template(
bids_directory, mri_par_names)
with open(opj(output_directory, "i_investigation.txt"), "w") as fp:
fp.write(investigation_template)
def _get_cmdline_parser():
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = MyParser(
description="BIDS to ISA-Tab converter.",
fromfile_prefix_chars='@')
# TODO Specify your real parameters here.
parser.add_argument(
"bids_directory",
help="Location of the root of your BIDS compatible directory",
metavar="BIDS_DIRECTORY")
parser.add_argument(
"output_directory",
help="Directory where ISA-TAB files will be stored",
metavar="OUTPUT_DIRECTORY")
parser.add_argument(
"-v",
"--verbose",
help="increase output verbosity",
action="store_true")
parser.add_argument(
"--keep-unknown",
help="""by default only explicitely white-listed parameters and
characteristics are considered. This option will force inclusion of
any discovered information. See --drop-parameter for additional
tuning.""",
action='store_true')
parser.add_argument(
"-d",
"--drop-parameter",
help="""list of parameters to ignore when composing the assay table. See
the generated table for column IDs to ignore. For example, to remove
column 'Parameter Value[time:samples:ContentTime]', specify
`--drop-parameter time:samples:ContentTime`. Only considered together
with --keep-unknown.""")
parser.add_argument(
"--repository-info",
metavar=('NAME', 'ACCESSION#', 'URL'),
help="""data repository information to be used in assay tables.
Example: 'OpenfMRI ds000113d https://openfmri.org/dataset/ds000113d'""",
nargs=3)
return parser
def main(argv=None):
parser = _get_cmdline_parser()
args = parser.parse_args(argv)
# Setup logging
if args.verbose:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)
extract(
args.bids_directory,
args.output_directory,
args.drop_parameter if args.keep_unknown else 'unknown',
args.repository_info
)
print("Metadata extraction complete.")
if __name__ == '__main__':
main()
#
# Make it work seamlessly as a datalad export plugin
#
def _datalad_export_plugin_call(
ds,
argv=None,
output=None,
drop_parameter=None,
repository_info=None):
if argv is not None:
# from cmdline -> go through std entrypoint
return main(argv + [ds.path, output])
# from Python API
return extract(
ds.path,
output_directory=output,
drop_parameter=drop_parameter,
repository_info=repository_info)
def _datalad_get_cmdline_help():
parser = _get_cmdline_parser()
# return help text and info on what to replace in it to still make
# sense when delivered through datalad
return \
parser.format_help(), \
(('BIDS_DIRECTORY', 'SETBYDATALAD'),
('OUTPUT_DIRECTORY', 'SETBYDATALAD'))
| apache-2.0 |
DigitalSlideArchive/HistomicsTK | setup.py | 1 | 3031 | #! /usr/bin/env python
import os
import sys
from setuptools import find_packages
try:
from skbuild import setup
except ImportError:
sys.stderr.write("""scikit-build is required to build from source or run tox.
Please run:
python -m pip install scikit-build
""")
# from setuptools import setup
sys.exit(1)
with open('README.rst') as readme_file:
readme = readme_file.read()
def prerelease_local_scheme(version):
"""
Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
"""
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') in {'master'}:
return ''
else:
return get_local_node_and_date(version)
setup(
name='histomicstk',
use_scm_version={'local_scheme': prerelease_local_scheme},
description='A Python toolkit for Histopathology Image Analysis',
long_description=readme,
long_description_content_type='text/x-rst',
author='Kitware, Inc.',
author_email='developers@digitalslidearchive.net',
url='https://github.com/DigitalSlideArchive/HistomicsTK',
packages=find_packages(exclude=['tests', '*_test']),
package_dir={
'histomicstk': 'histomicstk',
},
include_package_data=True,
install_requires=[
'girder_client',
# scientific packages
'nimfa>=1.3.2',
'numpy>=1.12.1',
'scipy>=0.19.0',
'Pillow>=3.2.0',
'pandas>=0.19.2',
'scikit-image>=0.14.2',
'scikit-learn>=0.18.1',
'imageio>=2.3.0',
'shapely[vectorized]',
'opencv-python-headless',
'sqlalchemy',
'matplotlib',
'pyvips',
# dask packages
'dask[dataframe]>=1.1.0',
'distributed>=1.21.6',
# large image; for non-linux systems only install the PIL tile source
# by default.
'large-image[sources];sys.platform=="linux"',
'large-image[sources];sys.platform=="linux2"',
'large-image[pil];sys.platform!="linux" and sys.platform!="linux2"',
'girder-slicer-cli-web',
# cli
'ctk-cli',
],
license='Apache Software License 2.0',
keywords='histomicstk',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
],
zip_safe=False,
python_requires='>=3.6',
)
| apache-2.0 |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/MHDgenerator/MHDmatrixSetup.py | 3 | 5074 |
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from dolfin import *
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
from scipy.sparse import coo_matrix, csr_matrix, spdiags, bmat
import os, inspect
from HiptmairSetup import BoundaryEdge
import matplotlib
# from matplotlib.pylab import plt
import CheckPetsc4py as CP
import MatrixOperations as MO
import time
import PETScIO as IO
import MHDmulti
def BoundaryIndices(mesh):
dim = mesh.geometry().dim()
if dim == 3:
EdgeBoundary = BoundaryEdge(mesh)
EdgeBoundary = np.sort(EdgeBoundary)[::2]
else:
B = BoundaryMesh(mesh,"exterior",False)
EdgeBoundary = B.entity_map(1).array()
MagneticBoundary = np.ones(mesh.num_edges())
MagneticBoundary[EdgeBoundary.astype("int32")] = 0
Magnetic = spdiags(MagneticBoundary,0,mesh.num_edges(),mesh.num_edges())
B = BoundaryMesh(mesh,"exterior",False)
NodalBoundary = B.entity_map(0).array()#.astype("int","C")
LagrangeBoundary = np.ones(mesh.num_vertices())
LagrangeBoundary[NodalBoundary] = 0
Lagrange = spdiags(LagrangeBoundary,0,mesh.num_vertices(),mesh.num_vertices())
if dim == 3:
VelocityBoundary = np.concatenate((LagrangeBoundary,LagrangeBoundary,LagrangeBoundary),axis=1)
else:
VelocityBoundary = np.concatenate((LagrangeBoundary,LagrangeBoundary),axis=1)
Velocity = spdiags(VelocityBoundary,0,dim*mesh.num_vertices(),dim*mesh.num_vertices())
return [Velocity, Magnetic, Lagrange]
def Assemble(W, NS, Maxwell, Couple, L_ns, L_m, RHSform, BC, Type, IterType):
tic()
if Type == 'NonLinear':
F = assemble(NS[0])
BC[0].apply(F)
F = F.sparray()
if IterType == 'Full':
C = assemble(Couple[0])
C = BC[4]*C.sparray()*BC[3]
else:
C = None
if RHSform == 0:
bu = assemble(L_ns)
bp = Function(W[1]).vector()
print bp.array()
bb = assemble(L_m)
br = Function(W[3]).vector()
BC[0].apply(bu)
BC[1].apply(bb)
BC[2].apply(br)
else:
bu = assemble(L_ns-RHSform[0])
bp = assemble(-RHSform[1])
bb = assemble(L_m-RHSform[2])
br = assemble(-RHSform[3])
BC[0].apply(bu)
BC[1].apply(bb)
BC[2].apply(br)
b = np.concatenate((bu.array(),bp.array(),bb.array(),br.array()),axis = 0)
MO.StrTimePrint("MHD non-linear matrix assembled, time: ",toc())
return [F, C],b
elif Type == 'Linear':
M = assemble(Maxwell[0])
D = assemble(Maxwell[2])
SS = assemble(Maxwell[3])
B = assemble(NS[2])
S = assemble(NS[3])
SS = 0*SS
BC[1].apply(M)
BC[2].apply(SS)
B = B.sparray()*BC[3]
S = S.sparray()
M = M.sparray()
D = BC[4]*D.sparray()*BC[5]
SS = SS.sparray()
MO.StrTimePrint("MHD linear matrix assembled, time: ",toc())
return [B,M,D,S,SS]
else:
bu = assemble(L_ns-RHSform[0])
bp = assemble(-RHSform[1])
bb = assemble(L_m-RHSform[2])
br = assemble(-RHSform[3])
BC[0].apply(bu)
BC[1].apply(bb)
BC[2].apply(br)
b = np.concatenate((bu.array(),bp.array(),bb.array(),br.array()),axis = 0)
return IO.arrayToVec(b)
def SystemAssemble(W,A,b,SetupType,IterType):
tic()
if SetupType == 'Matrix':
if IterType == 'Full':
A = CP.Scipy2PETSc(bmat([[A[0],A[2].T,-A[1].T,None],
[A[2],A[5],None,None],
[A[1],None,A[3],A[4]],
[None,None,A[4].T,A[6]]]))
else:
A = [CP.Scipy2PETSc(bmat([[A[0],A[2].T],
[A[2],A[5]]])),CP.Scipy2PETSc(bmat([[A[3],A[4]],
[A[4].T,A[6]]]))]
b = IO.arrayToVec(b)
MO.StrTimePrint("MHD system assemble, time: ",toc())
return A,b
else:
for i in range(len(A)):
if A[i] != None:
A[i] = CP.Scipy2PETSc(A[i])
if IterType == 'Full':
P = PETSc.Mat().createPython([W[0].dim()+W[1].dim()+W[2].dim()+W[3].dim(),W[0].dim()+W[1].dim()+W[2].dim()+W[3].dim()])
P.setType('python')
p = MHDmulti.MHDmat(W,A)
P.setPythonContext(p)
else:
MatFluid = PETSc.Mat().createPython([W[0].dim()+W[1].dim(), W[0].dim()+W[1].dim()])
MatFluid.setType('python')
pFluid = MHDmulti.MatFluid([W[0],W[1]],A)
MatFluid.setPythonContext(pFluid)
MatMag = PETSc.Mat().createPython([W[2].dim()+W[3].dim(), W[2].dim()+W[3].dim()])
MatMag.setType('python')
pMag = MHDmulti.MatMag([W[2],W[3]],A)
MatMag.setPythonContext(pMag)
P = [MatFluid,MatMag]
b = IO.arrayToVec(b)
MO.StrTimePrint("MHD mult-class setup, time: ",toc())
return P,b
| mit |
rupakc/Kaggle-Compendium | Pokemon/pokedock.py | 1 | 1692 | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 24 14:04:20 2016
@author: Rupak Chakraborty
"""
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn import metrics
from sklearn import cross_validation
filepath = 'C:\\Users\\rupachak\\Desktop\\Kaggle Data\\Pokemon\\Pokemon.csv'
poke_frame = pd.read_csv(filepath)
label_encoder = LabelEncoder()
one_hot = OneHotEncoder()
poke_frame.drop(poke_frame.columns[[0,1]],axis=1,inplace=True)
poke_frame['Legendary'] = label_encoder.fit_transform(poke_frame['Legendary'].values)
poke_frame['Type 2'] = (label_encoder.fit_transform(poke_frame['Type 2'].values))
train_labels = poke_frame['Type 1'].values
del poke_frame['Type 1']
train_data = poke_frame.values
X_train,X_test,y_train,y_test = cross_validation.train_test_split(train_data,train_labels,test_size=0.2)
rf = RandomForestClassifier(n_estimators=51,max_depth=5)
ada = AdaBoostClassifier()
grad = GradientBoostingClassifier(n_estimators=51,max_depth=5)
bag = BaggingClassifier()
classifiers = [rf,ada,grad,bag]
classifier_names = ["Random Forests","Adaboost","Gradient Boost","Bagging"]
for classifier,classifier_name in zip(classifiers,classifier_names):
classifier.fit(X_train,y_train)
y_predict = classifier.predict(X_test)
print 'For Classifier ',classifier_name,'\n'
print metrics.classification_report(y_test,y_predict)
print 'Accuracy ',metrics.accuracy_score(y_test,y_predict) | mit |
wzbozon/scikit-learn | sklearn/tree/tests/test_export.py | 130 | 9950 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
ishanic/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
vberaudi/utwt | sudoku.py | 1 | 1116 | import pandas as pd
import pandas as pd
from docplex.cp.model import *
GRNG = range(9)
problem_data = pd.read_csv("sudoku.csv", sep=";")
problem = []
for t in problem_data.itertuples(index=False):
problem.append([i for i in t])
mdl = CpoModel(name="Sudoku")
grid = [[integer_var(min=1, max=9, name="C" + str(l) + str(c)) for l in GRNG] for c in GRNG]
for l in GRNG:
mdl.add(all_diff([grid[l][c] for c in GRNG]))
for c in GRNG:
mdl.add(all_diff([grid[l][c] for l in GRNG]))
ssrng = range(0, 9, 3)
for sl in ssrng:
for sc in ssrng:
mdl.add(all_diff([grid[l][c] for l in range(sl, sl + 3) for c in range(sc, sc + 3)]))
for l in GRNG:
for c in GRNG:
v = problem[l][c]
if v > 0:
grid[l][c].set_domain((v, v))
print("\nSolving model....")
msol = mdl.solve(TimeLimit=10)
sol = [[msol[grid[l][c]] for c in GRNG] for l in GRNG]
pd.DataFrame(sol).to_csv("sudoku_res.csv", sep=";", index=False)
from docplex.worker.clientapi import set_output_attachments
outputs = dict()
outputs['sudoku_res.csv'] = './sudoku_res.csv'
set_output_attachments(outputs)
| apache-2.0 |
google-code-export/nmrglue | doc/_build/html/examples/el/sample_applications/apod_viewer_1win.py | 10 | 9854 | #!/usr/bin/env python
"""
An example of using wxPython to build a GUI application using nmrglue
This application displays the NMRPipe apodization windows
"""
import numpy as np
import nmrglue as ng
import matplotlib
# uncomment the following to use wx rather than wxagg
#matplotlib.use('WX')
#from matplotlib.backends.backend_wx import FigureCanvasWx as FigureCanvas
# comment out the following to use wx rather than wxagg
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import wx
apod_list = ["SP","EM","GM","GMB","TM","TRI","JMOD"]
class ParameterPanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent,-1)
self.parent = parent
self.qName1 = wx.StaticText(self,-1,"Type:")
self.qName2 = wx.Choice(self,-1,choices=apod_list)
self.Bind(wx.EVT_CHOICE,self.ApodChoose,self.qName2)
self.q1_1 = wx.StaticText(self,-1,"q1:")
self.q1_2 = wx.TextCtrl(self,-1,"0.0")
self.q2_1 = wx.StaticText(self,-1,"q2:")
self.q2_2 = wx.TextCtrl(self,-1,"1.0")
self.q3_1 = wx.StaticText(self,-1,"q3:")
self.q3_2 = wx.TextCtrl(self,-1,"1.0")
self.c1 = wx.StaticText(self,-1,"c")
self.c2 = wx.TextCtrl(self,-1,"1.0")
self.start_1 = wx.StaticText(self,-1,"Start")
self.start_2 = wx.TextCtrl(self,-1,"1.0")
self.size_1 = wx.StaticText(self,-1,"Size")
self.size_1.Enable(False)
self.size_2 = wx.TextCtrl(self,-1,"1.0")
self.size_2.Enable(False)
self.inv = wx.CheckBox(self,-1,"Invert")
self.use_size = wx.CheckBox(self,-1,"Custom Size")
self.Bind(wx.EVT_CHECKBOX,self.OnLimitCheck,self.use_size)
self.points_1 = wx.StaticText(self,-1,"Number of Points:")
self.points_2 = wx.TextCtrl(self,-1,"1000")
self.sw_1 = wx.StaticText(self,-1,"Spectral Width:")
self.sw_2 = wx.TextCtrl(self,-1,"50000.")
self.b1 = wx.Button(self,10,"Draw")
self.Bind(wx.EVT_BUTTON,self.OnDraw,self.b1)
self.b1.SetDefault()
self.b2 = wx.Button(self,20,"Clear")
self.Bind(wx.EVT_BUTTON,self.OnClear,self.b2)
self.b2.SetDefault()
self.InitApod("SP")
# layout
apod_grid = wx.GridSizer(8,2)
apod_grid.AddMany([self.qName1, self.qName2,
self.q1_1, self.q1_2,
self.q2_1, self.q2_2,
self.q3_1, self.q3_2,
self.c1,self.c2,
self.start_1,self.start_2,
self.size_1,self.size_2,
self.inv,self.use_size])
data_grid = wx.GridSizer(2,2)
data_grid.AddMany([self.points_1,self.points_2,
self.sw_1,self.sw_2])
apod_box = wx.StaticBoxSizer(wx.StaticBox(self,-1,
"Apodization Parameters"))
apod_box.Add(apod_grid)
data_box = wx.StaticBoxSizer(wx.StaticBox(self,-1,
"Data Parameters"))
data_box.Add(data_grid)
button_box = wx.GridSizer(1,2)
button_box.AddMany([self.b1,self.b2])
mainbox = wx.BoxSizer(wx.VERTICAL)
mainbox.Add(apod_box)
mainbox.Add(data_box)
mainbox.Add(button_box)
self.SetSizer(mainbox)
def OnLimitCheck(self,event):
k= event.IsChecked()
self.size_1.Enable(k)
self.size_2.Enable(k)
points = float(self.points_2.GetValue())
self.size_2.SetValue(str(points))
def ApodChoose(self,event):
self.InitApod(apod_list[self.qName2.GetCurrentSelection()])
def InitApod(self,qName):
if qName == "SP":
self.q1_1.Enable(True)
self.q1_1.SetLabel("off")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(True)
self.q2_1.SetLabel("end")
self.q2_2.Enable(True)
self.q2_2.SetValue("1.0")
self.q3_1.Enable(True)
self.q3_1.SetLabel("pow")
self.q3_2.Enable(True)
self.q3_2.SetValue("1.0")
elif qName == "EM":
self.q1_1.Enable(True)
self.q1_1.SetLabel("lb (Hz)")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(False)
self.q2_2.Enable(False)
self.q3_1.Enable(False)
self.q3_2.Enable(False)
elif qName == "GM":
self.q1_1.Enable(True)
self.q1_1.SetLabel("g1 (Hz)")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(True)
self.q2_1.SetLabel("g2 (Hz)")
self.q2_2.Enable(True)
self.q2_2.SetValue("0.0")
self.q3_1.Enable(True)
self.q3_1.SetLabel("g3")
self.q3_2.Enable(True)
self.q3_2.SetValue("0.0")
elif qName == "GMB":
self.q1_1.Enable(True)
self.q1_1.SetLabel("lb")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(True)
self.q2_1.SetLabel("gb")
self.q2_2.Enable(True)
self.q2_2.SetValue("0.0")
self.q3_1.Enable(False)
self.q3_2.Enable(False)
elif qName == "TM":
self.q1_1.Enable(True)
self.q1_1.SetLabel("t1")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(True)
self.q2_1.SetLabel("t2")
self.q2_2.Enable(True)
self.q2_2.SetValue("0.0")
self.q3_1.Enable(False)
self.q3_2.Enable(False)
elif qName == "TRI":
self.q1_1.Enable(True)
self.q1_1.SetLabel("loc")
self.q1_2.Enable(True)
points = points = float(self.points_2.GetValue())
self.q1_2.SetValue(str(points/2.))
self.q2_1.Enable(True)
self.q2_1.SetLabel("lHi")
self.q2_2.Enable(True)
self.q2_2.SetValue("0.0")
self.q3_1.Enable(True)
self.q3_1.SetLabel("rHi")
self.q3_2.Enable(True)
self.q3_2.SetValue("0.0")
elif qName == "JMOD":
self.q1_1.Enable(True)
self.q1_1.SetLabel("off")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(True)
self.q2_1.SetLabel("j (Hz)")
self.q2_2.Enable(True)
self.q2_2.SetValue("0.0")
self.q3_1.Enable(True)
self.q3_1.SetLabel("lb (Hz)")
self.q3_2.Enable(True)
self.q3_2.SetValue("0.0")
def OnDraw(self,event):
qName = apod_list[self.qName2.GetCurrentSelection()]
q1 = float(self.q1_2.GetValue())
q2 = float(self.q2_2.GetValue())
q3 = float(self.q3_2.GetValue())
c = float(self.c2.GetValue())
start = float(self.start_2.GetValue())
size = float(self.size_2.GetValue())
inv = self.inv.GetValue()
use_size = self.use_size.GetValue()
points = float(self.points_2.GetValue())
sw = float(self.sw_2.GetValue())
self.parent.ApplyApod(qName,q1,q2,q3,c,start,size,inv,use_size,
points,sw)
def OnClear(self,event):
self.parent.ClearFigure()
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,-1,'Apodization Viewer')
self.SetBackgroundColour(wx.NamedColor("WHITE"))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
self.params = ParameterPanel(self)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
# layout
fsizer = wx.BoxSizer(wx.VERTICAL)
fsizer.Add(self.canvas,0,wx.EXPAND)
fsizer.Add(self.toolbar,0,wx.EXPAND)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.params,0,wx.EXPAND)
self.sizer.Add(fsizer,0,wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
def OnPaint(self, event):
self.canvas.draw()
def ClearFigure(self):
self.axes.cla()
self.OnPaint(-1)
def ApplyApod(self,qName,q1,q2,q3,c,start,size,inv,use_size,points,sw):
"""
print "DEBUGGING INFOMATION"
print "ApplyApod Recieved:"
print "qName:",qName
print "q1:",q1
print "q2:",q2
print "q3:",q3
print "c:",c
print "start:",start
print "size:",size
print "inv:",inv
print "use_size:",use_size
print "points:",points
print "sw:",sw
"""
# create the dictionary
dic = ng.fileiobase.create_blank_udic(1)
dic[0]["sw"] = sw
dic[0]["size"] = points
# create the data
data = np.ones(points,dtype="complex")
# convert to NMRPipe format
C = ng.convert.converter()
C.from_universal(dic,data)
pdic,pdata = C.to_pipe()
if use_size == True:
tsize = size
else:
tsize = 'default'
null,apod_data = ng.pipe_proc.apod(pdic,pdata,qName=qName,
q1=q1,q2=q2,q3=q3,c=c,inv=inv,size=tsize,start=start)
# draw the window
#self.axes.cla()
self.axes.plot(apod_data)
self.OnPaint(-1)
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
| bsd-3-clause |
TheCoSMoCompany/biopredyn | Prototype/python/biopredyn/biopredynCL.py | 1 | 3886 | #!/usr/bin/env python
# coding=utf-8
## @package biopredyn
## Copyright: [2012-2019] Cosmo Tech, All Rights Reserved
## License: BSD 3-Clause
import sys
import getopt
import textwrap
import libsbml
import libsedml
import libnuml
from biopredyn import model, workflow, result, resources
import matplotlib.pyplot as plt
COMMAND_SYNTAX_MESSAGE = 'python biopredynCL.py [options]'
HELP_MESSAGE = "This program is a prototype for the BioPreDyn software suite developed within the scope of the BioPreDyn FP7 project; it applies an analysis pattern encoded as a SEDML file to a SBML model defining a biological system."
# Optional parameters.
HELP_OPTION = {
"-h, --help" : [ "Display this help message."],
"--sbml" : [ "Open the input file as an SBML model; SBML compliance " +
"will be checked."],
"--sedml" : [ "Open the input SED-ML model file, execute its tasks " +
"using the libSBMLSim library, process its graphical " +
"outputs and display them."],
"--numl" : [ "Open the input NuML result file, import it in a Result " +
"object and plot its content."],
"-o, --output" : [ "Write the result of the SEDML experiment in the input " +
".csv ot .xml file."],
"--csv" : [ "Open the input CSV result file, import it in a Result " +
"object and plot its content."]
}
HELP_KEYWORD_SIZE = 16 # Left column
HELP_WRAP_SIZE = 79 # Total. 79 is windows limit
## Display help information.
def print_help():
# General help message
print(" ")
lines = textwrap.wrap(HELP_MESSAGE, HELP_WRAP_SIZE)
for line in lines:
print(line)
print(" ")
# Command syntax
print("Usage: ")
print(COMMAND_SYNTAX_MESSAGE)
print(" ")
print("List of available options: ")
# Optional arguments
for arg in HELP_OPTION:
print_help_argument(arg, HELP_OPTION[arg])
## Display help information for the input option.
# @param arg Name of the help option to be displayed.
# @param listhelplines Information about the help option being displayed.
def print_help_argument(arg, listhelplines):
firstLine = True
# Go trough list of help line
for helpline in listhelplines:
lines = textwrap.wrap(helpline, HELP_WRAP_SIZE - HELP_KEYWORD_SIZE)
for line in lines:
# First line: Print arg name.
if firstLine:
print((arg).ljust(HELP_KEYWORD_SIZE) + line)
firstLine = False
else:
print(''.ljust(HELP_KEYWORD_SIZE) + line)
print("")
# main
try:
opts, args = getopt.getopt(sys.argv[1:], 'o:', [
'help', 'sbml=', 'sedml=', 'numl=', 'output=', 'csv='])
except getopt.error as msg:
print( COMMAND_SYNTAX_MESSAGE )
print( "Type biopredynCL.py --help for more information" )
print( msg )
sys.exit(2)
output = None
for o, a in opts:
if o in ("--help", "-h"):
print_help()
sys.exit(0)
elif o in ("--output", "-o"):
output = a
# Installing resource manager
manager = resources.ResourceManager()
for o, a in opts:
if o == "--sbml":
model = model.Model(manager, source=a, idf='model_0')
model.check()
elif o == "--sedml":
flow = workflow.WorkFlow(manager, source=a)
flow.run_tasks()
flow.process_outputs(test=False, filename=output)
elif o == "--numl":
res = result.TimeSeries()
res.import_from_numl_file(a, manager)
plt.figure("numl_test")
for i in res.get_result():
if str.lower(i) != "time":
plt.plot(res.get_time_steps(), res.get_quantities_per_species(i))
plt.legend()
plt.show()
elif o == "--csv":
res = result.TimeSeries()
res.import_from_csv_file(a, manager)
values = res.get_result()
plt.figure("csv_test")
for i in res.get_result():
if str.lower(i) != "time":
plt.plot(res.get_time_steps(), res.get_quantities_per_species(i))
plt.legend()
plt.show()
| bsd-3-clause |
BrianGasberg/filterpy | filterpy/kalman/tests/test_mmae.py | 1 | 5136 | # -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy.random as random
import numpy as np
import matplotlib.pyplot as plt
from filterpy.kalman import KalmanFilter, MMAEFilterBank
from numpy import array
from filterpy.common import Q_discrete_white_noise
import matplotlib.pyplot as plt
from numpy.random import randn
from math import sin, cos, radians
DO_PLOT = False
class NoisySensor(object):
def __init__(self, noise_factor=1):
self.noise_factor = noise_factor
def sense(self, pos):
return (pos[0] + randn()*self.noise_factor,
pos[1] + randn()*self.noise_factor)
def angle_between(x, y):
return min(y-x, y-x+360, y-x-360, key=abs)
class ManeuveringTarget(object):
def __init__(self, x0, y0, v0, heading):
self.x = x0
self.y = y0
self.vel = v0
self.hdg = heading
self.cmd_vel = v0
self.cmd_hdg = heading
self.vel_step = 0
self.hdg_step = 0
self.vel_delta = 0
self.hdg_delta = 0
def update(self):
vx = self.vel * cos(radians(90-self.hdg))
vy = self.vel * sin(radians(90-self.hdg))
self.x += vx
self.y += vy
if self.hdg_step > 0:
self.hdg_step -= 1
self.hdg += self.hdg_delta
if self.vel_step > 0:
self.vel_step -= 1
self.vel += self.vel_delta
return (self.x, self.y)
def set_commanded_heading(self, hdg_degrees, steps):
self.cmd_hdg = hdg_degrees
self.hdg_delta = angle_between(self.cmd_hdg,
self.hdg) / steps
if abs(self.hdg_delta) > 0:
self.hdg_step = steps
else:
self.hdg_step = 0
def set_commanded_speed(self, speed, steps):
self.cmd_vel = speed
self.vel_delta = (self.cmd_vel - self.vel) / steps
if abs(self.vel_delta) > 0:
self.vel_step = steps
else:
self.vel_step = 0
def make_cv_filter(dt, noise_factor):
cvfilter = KalmanFilter(dim_x = 2, dim_z=1)
cvfilter.x = array([0., 0.])
cvfilter.P *= 3
cvfilter.R *= noise_factor**2
cvfilter.F = array([[1, dt],
[0, 1]], dtype=float)
cvfilter.H = array([[1, 0]], dtype=float)
cvfilter.Q = Q_discrete_white_noise(dim=2, dt=dt, var=0.02)
return cvfilter
def make_ca_filter(dt, noise_factor):
cafilter = KalmanFilter(dim_x=3, dim_z=1)
cafilter.x = array([0., 0., 0.])
cafilter.P *= 3
cafilter.R *= noise_factor**2
cafilter.Q = Q_discrete_white_noise(dim=3, dt=dt, var=0.02)
cafilter.F = array([[1, dt, 0.5*dt*dt],
[0, 1, dt],
[0, 0, 1]], dtype=float)
cafilter.H = array([[1, 0, 0]], dtype=float)
return cafilter
def generate_data(steady_count, noise_factor):
t = ManeuveringTarget(x0=0, y0=0, v0=0.3, heading=0)
xs = []
ys = []
for i in range(30):
x, y = t.update()
xs.append(x)
ys.append(y)
t.set_commanded_heading(310, 25)
t.set_commanded_speed(1, 15)
for i in range(steady_count):
x, y = t.update()
xs.append(x)
ys.append(y)
ns = NoisySensor(noise_factor=noise_factor)
pos = array(list(zip(xs, ys)))
zs = array([ns.sense(p) for p in pos])
return pos, zs
def test_MMAE2():
dt = 0.1
pos, zs = generate_data(120, noise_factor=0.6)
z_xs = zs[:, 0]
t = np.arange(0, len(z_xs) * dt, dt)
dt = 0.1
ca = make_ca_filter(dt, noise_factor=0.6)
cv = make_ca_filter(dt, noise_factor=0.6)
cv.F[:,2] = 0 # remove acceleration term
cv.P[2,2] = 0
cv.Q[2,2] = 0
filters = [cv, ca]
H_ca = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
bank = MMAEFilterBank(filters, (0.5, 0.5), dim_x=3, H=(1., 1.))
xs, probs = [], []
cvxs, caxs = [], []
for i, z in enumerate(z_xs):
bank.predict()
bank.update(z)
xs.append(bank.x[0])
cvxs.append(cv.x[0])
caxs.append(ca.x[0])
print(i, cv.likelihood(z), ca.likelihood(z), bank.p)
#print('p', bank.p)
probs.append(bank.p[0] / bank.p[1])
if DO_PLOT:
plt.subplot(121)
plt.plot(xs)
plt.plot(pos[:, 0])
plt.subplot(122)
plt.plot(probs)
plt.title('probability ratio p(cv)/p(ca)')
plt.figure()
plt.plot(cvxs, label='CV')
plt.plot(caxs, label='CA')
plt.plot(pos[:, 0])
plt.legend()
plt.figure()
plt.plot(xs)
plt.plot(pos[:, 0])
if __name__ == '__main__':
DO_PLOT = True
test_MMAE2() | mit |
lucarebuffi/OASYS1 | oasys/widgets/error_profile/ow_abstract_dabam_height_profile.py | 1 | 53621 | import os, sys
import time
import numpy
import threading
from PyQt5.QtCore import QRect, Qt
from PyQt5.QtWidgets import QApplication, QMessageBox, QScrollArea, QTableWidget, QTableWidgetItem, QHeaderView, QAbstractItemView, QWidget, QLabel, QSizePolicy
from PyQt5.QtGui import QTextCursor,QFont, QPalette, QColor, QPainter, QBrush, QPen, QPixmap
from matplotlib import cm
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.figure import Figure
from orangewidget import gui, widget
from orangewidget.settings import Setting
from oasys.widgets.widget import OWWidget
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.widgets.gui import ConfirmDialog
from oasys.util.oasys_util import EmittingStream
try:
from mpl_toolkits.mplot3d import Axes3D # necessario per caricare i plot 3D
except:
pass
from srxraylib.metrology import profiles_simulation, dabam
class OWAbstractDabamHeightProfile(OWWidget):
want_main_area = 1
want_control_area = 1
MAX_WIDTH = 1320
MAX_HEIGHT = 700
IMAGE_WIDTH = 860
IMAGE_HEIGHT = 645
CONTROL_AREA_WIDTH = 405
TABS_AREA_HEIGHT = 618
xx = None
yy = None
zz = None
entry_number = Setting(1)
shape=Setting(0)
slope_error_from = Setting(0.0)
slope_error_to = Setting(1.5)
dimension_y_from = Setting(0.0)
dimension_y_to = Setting(2.0)
use_undetrended = Setting(0)
step_x = Setting(0.01)
dimension_x = Setting(0.1)
kind_of_profile_x = Setting(3)
power_law_exponent_beta_x = Setting(3.0)
correlation_length_x = Setting(0.3)
error_type_x = Setting(profiles_simulation.FIGURE_ERROR)
rms_x = Setting(0.1)
montecarlo_seed_x = Setting(8787)
heigth_profile_1D_file_name_x= Setting("mirror_1D_x.dat")
delimiter_x = Setting(0)
conversion_factor_x_x = Setting(0.001)
conversion_factor_x_y = Setting(1e-6)
center_x = Setting(1)
modify_x = Setting(0)
new_length_x = Setting(0.201)
filler_value_x = Setting(0.0)
renormalize_x = Setting(0)
center_y = Setting(1)
modify_y = Setting(0)
new_length_y = Setting(0.2)
filler_value_y = Setting(0.0)
renormalize_y = Setting(1)
error_type_y = Setting(0)
rms_y = Setting(0.9)
dabam_profile_index = Setting(1)
heigth_profile_file_name = Setting('mirror.hdf5')
tab=[]
plotted = False
def __init__(self):
super().__init__()
self.runaction = widget.OWAction("Calculate Height Profile", self)
self.runaction.triggered.connect(self.calculate_heigth_profile_ni)
self.addAction(self.runaction)
self.runaction = widget.OWAction("Generate Height Profile File", self)
self.runaction.triggered.connect(self.generate_heigth_profile_file_ni)
self.addAction(self.runaction)
geom = QApplication.desktop().availableGeometry()
self.setGeometry(QRect(round(geom.width() * 0.05),
round(geom.height() * 0.05),
round(min(geom.width() * 0.98, self.MAX_WIDTH)),
round(min(geom.height() * 0.95, self.MAX_HEIGHT))))
self.setMaximumHeight(self.geometry().height())
self.setMaximumWidth(self.geometry().width())
# DABAM INITIALIZATION
self.server = dabam.dabam()
self.server.set_input_silent(True)
gui.separator(self.controlArea)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Calculate Height\nProfile", callback=self.calculate_heigth_profile)
button.setFixedHeight(45)
button = gui.button(button_box, self, "Generate Height\nProfile File", callback=self.generate_heigth_profile_file)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
button = gui.button(button_box, self, "Reset Fields", callback=self.call_reset_settings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
gui.separator(self.controlArea)
tabs_setting = oasysgui.tabWidget(self.controlArea)
tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
tab_input = oasysgui.createTabPage(tabs_setting, "DABAM Search Setting")
tab_gener = oasysgui.createTabPage(tabs_setting, "DABAM Generation Setting")
tab_out = oasysgui.createTabPage(tabs_setting, "Output")
tab_usa = oasysgui.createTabPage(tabs_setting, "Use of the Widget")
tab_usa.setStyleSheet("background-color: white;")
tabs_dabam = oasysgui.tabWidget(tab_gener)
tab_length = oasysgui.createTabPage(tabs_dabam, "DABAM Profile")
tab_width = oasysgui.createTabPage(tabs_dabam, "Width")
usage_box = oasysgui.widgetBox(tab_usa, "", addSpace=True, orientation="horizontal")
label = QLabel("")
label.setAlignment(Qt.AlignCenter)
label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
label.setPixmap(QPixmap(self.get_usage_path()))
usage_box.layout().addWidget(label)
manual_box = oasysgui.widgetBox(tab_input, "Manual Entry", addSpace=True, orientation="vertical")
oasysgui.lineEdit(manual_box, self, "entry_number", "Entry Number",
labelWidth=300, valueType=int, orientation="horizontal")
gui.separator(manual_box)
button_box = oasysgui.widgetBox(manual_box, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Retrieve Profile", callback=self.retrieve_profile)
button.setFixedHeight(35)
button = gui.button(button_box, self, "Send Profile", callback=self.send_profile)
button.setFixedHeight(35)
input_box = oasysgui.widgetBox(tab_input, "Search Parameters", addSpace=True, orientation="vertical")
gui.comboBox(input_box, self, "shape", label="Mirror Shape", labelWidth=300,
items=["All", "Plane", "Cylindrical", "Elliptical", "Toroidal", "Spherical"],
sendSelectedValue=False, orientation="horizontal")
gui.separator(input_box)
input_box_1 = oasysgui.widgetBox(input_box, "", addSpace=True, orientation="horizontal")
oasysgui.lineEdit(input_box_1, self, "slope_error_from", "Slope Error From (" + u"\u03BC" + "rad)",
labelWidth=150, valueType=float, orientation="horizontal")
oasysgui.lineEdit(input_box_1, self, "slope_error_to", "To (" + u"\u03BC" + "rad)",
labelWidth=60, valueType=float, orientation="horizontal")
input_box_2 = oasysgui.widgetBox(input_box, "", addSpace=True, orientation="horizontal")
self.le_dimension_y_from = oasysgui.lineEdit(input_box_2, self, "dimension_y_from", "Mirror Length From",
labelWidth=150, valueType=float, orientation="horizontal")
self.le_dimension_y_to = oasysgui.lineEdit(input_box_2, self, "dimension_y_to", "To",
labelWidth=60, valueType=float, orientation="horizontal")
table_box = oasysgui.widgetBox(tab_input, "Search Results", addSpace=True, orientation="vertical", height=250)
self.overlay_search = Overlay(table_box, self.search_profiles)
self.overlay_search.hide()
button = gui.button(input_box, self, "Search", callback=self.overlay_search.show)
button.setFixedHeight(35)
button.setFixedWidth(self.CONTROL_AREA_WIDTH-35)
gui.comboBox(table_box, self, "use_undetrended", label="Use Undetrended Profile", labelWidth=300,
items=["No", "Yes"], callback=self.table_item_clicked, sendSelectedValue=False, orientation="horizontal")
gui.separator(table_box)
self.scrollarea = QScrollArea()
self.scrollarea.setMinimumWidth(self.CONTROL_AREA_WIDTH-35)
table_box.layout().addWidget(self.scrollarea, alignment=Qt.AlignHCenter)
self.table = QTableWidget(1, 5)
self.table.setStyleSheet("background-color: #FBFBFB;")
self.table.setAlternatingRowColors(True)
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed)
self.table.verticalHeader().setVisible(False)
self.table.setColumnWidth(0, 40)
self.table.setColumnWidth(1, 70)
self.table.setColumnWidth(2, 70)
self.table.setColumnWidth(3, 85)
self.table.setColumnWidth(4, 80)
self.table.resizeRowsToContents()
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.itemClicked.connect(self.table_item_clicked)
self.scrollarea.setWidget(self.table)
self.scrollarea.setWidgetResizable(1)
##----------------------------------
output_profile_box = oasysgui.widgetBox(tab_length, "Surface Generation Parameters", addSpace=True, orientation="vertical", height=320)
gui.comboBox(output_profile_box, self, "center_y", label="Center Profile in the middle of O.E.", labelWidth=300,
items=["No", "Yes"], sendSelectedValue=False, orientation="horizontal")
gui.separator(output_profile_box)
gui.comboBox(output_profile_box, self, "modify_y", label="Modify Length?", labelWidth=150,
items=["No", "Rescale to new length", "Fit to new length (fill or cut)"], callback=self.set_ModifyY, sendSelectedValue=False, orientation="horizontal")
self.modify_box_1 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.modify_box_2 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.le_new_length_1 = oasysgui.lineEdit(self.modify_box_2, self, "new_length_y", "New Length", labelWidth=300, valueType=float, orientation="horizontal")
self.modify_box_3 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.le_new_length_2 = oasysgui.lineEdit(self.modify_box_3, self, "new_length_y", "New Length", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.modify_box_3, self, "filler_value_y", "Filler Value (if new length > profile length) [nm]", labelWidth=300, valueType=float, orientation="horizontal")
self.set_ModifyY()
gui.comboBox(output_profile_box, self, "renormalize_y", label="Renormalize Length Profile to different RMS", labelWidth=300,
items=["No", "Yes"], callback=self.set_RenormalizeY, sendSelectedValue=False, orientation="horizontal")
self.output_profile_box_1 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.output_profile_box_2 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
gui.comboBox(self.output_profile_box_1, self, "error_type_y", label="Normalization to", labelWidth=270,
items=["Figure Error (nm)", "Slope Error (" + u"\u03BC" + "rad)"],
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.output_profile_box_1, self, "rms_y", "Rms Value",
labelWidth=300, valueType=float, orientation="horizontal")
self.set_RenormalizeY()
##----------------------------------
input_box_w = oasysgui.widgetBox(tab_width, "Calculation Parameters", addSpace=True, orientation="vertical")
gui.comboBox(input_box_w, self, "kind_of_profile_x", label="Kind of Profile", labelWidth=260,
items=["Fractal", "Gaussian", "User File", "None"],
callback=self.set_KindOfProfileX, sendSelectedValue=False, orientation="horizontal")
gui.separator(input_box_w)
self.kind_of_profile_x_box_1 = oasysgui.widgetBox(input_box_w, "", addSpace=True, orientation="vertical", height=350)
self.le_dimension_x = oasysgui.lineEdit(self.kind_of_profile_x_box_1, self, "dimension_x", "Dimensions",
labelWidth=260, valueType=float, orientation="horizontal")
self.le_step_x = oasysgui.lineEdit(self.kind_of_profile_x_box_1, self, "step_x", "Step",
labelWidth=260, valueType=float, orientation="horizontal")
self.kind_of_profile_x_box_1_0 = oasysgui.widgetBox(self.kind_of_profile_x_box_1, "", addSpace=True, orientation="vertical")
oasysgui.lineEdit(self.kind_of_profile_x_box_1_0, self, "montecarlo_seed_x", "Monte Carlo initial seed",
labelWidth=260, valueType=int, orientation="horizontal")
self.kind_of_profile_x_box_1_1 = oasysgui.widgetBox(self.kind_of_profile_x_box_1, "", addSpace=True, orientation="vertical")
oasysgui.lineEdit(self.kind_of_profile_x_box_1_1, self, "power_law_exponent_beta_x", "Beta Value",
labelWidth=260, valueType=float, orientation="horizontal")
self.kind_of_profile_x_box_1_2 = oasysgui.widgetBox(self.kind_of_profile_x_box_1, "", addSpace=True, orientation="vertical")
self.le_correlation_length_x = oasysgui.lineEdit(self.kind_of_profile_x_box_1_2, self, "correlation_length_x", "Correlation Length",
labelWidth=260, valueType=float, orientation="horizontal")
gui.separator(self.kind_of_profile_x_box_1)
self.kind_of_profile_x_box_1_3 = oasysgui.widgetBox(self.kind_of_profile_x_box_1, "", addSpace=True, orientation="vertical")
gui.comboBox(self.kind_of_profile_x_box_1_3, self, "error_type_x", label="Normalization to", labelWidth=270,
items=["Figure Error (nm)", "Slope Error (" + "\u03BC" + "rad)"],
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.kind_of_profile_x_box_1_3, self, "rms_x", "Rms Value",
labelWidth=260, valueType=float, orientation="horizontal")
self.kind_of_profile_x_box_2 = oasysgui.widgetBox(input_box_w, "", addSpace=True, orientation="vertical", height=390)
select_file_box_1 = oasysgui.widgetBox(self.kind_of_profile_x_box_2, "", addSpace=True, orientation="horizontal")
self.le_heigth_profile_1D_file_name_x = oasysgui.lineEdit(select_file_box_1, self, "heigth_profile_1D_file_name_x", "1D Profile File Name",
labelWidth=120, valueType=str, orientation="horizontal")
gui.button(select_file_box_1, self, "...", callback=self.selectFile1D_X)
gui.comboBox(self.kind_of_profile_x_box_2 , self, "delimiter_x", label="Fields delimiter", labelWidth=260,
items=["Spaces", "Tabs"], sendSelectedValue=False, orientation="horizontal")
self.le_conversion_factor_x_x = oasysgui.lineEdit(self.kind_of_profile_x_box_2, self, "conversion_factor_x_x", "Conversion from file to meters\n(Abscissa)",
labelWidth=260,
valueType=float, orientation="horizontal")
self.le_conversion_factor_x_y = oasysgui.lineEdit(self.kind_of_profile_x_box_2, self, "conversion_factor_x_y", "Conversion from file to meters\n(Height Profile Values)",
labelWidth=260,
valueType=float, orientation="horizontal")
gui.separator(self.kind_of_profile_x_box_2)
gui.comboBox(self.kind_of_profile_x_box_2, self, "center_x", label="Center Profile in the middle of O.E.", labelWidth=300,
items=["No", "Yes"], sendSelectedValue=False, orientation="horizontal")
gui.comboBox(self.kind_of_profile_x_box_2, self, "modify_x", label="Modify Length?", labelWidth=200,
items=["No", "Rescale to new length", "Fit to new length (fill or cut)"], callback=self.set_ModifyX, sendSelectedValue=False, orientation="horizontal")
self.modify_box_1_1 = oasysgui.widgetBox(self.kind_of_profile_x_box_2, "", addSpace=False, orientation="vertical", height=70)
self.modify_box_1_2 = oasysgui.widgetBox(self.kind_of_profile_x_box_2, "", addSpace=False, orientation="vertical", height=70)
self.le_new_length_x_1 = oasysgui.lineEdit(self.modify_box_1_2, self, "new_length_x", "New Length", labelWidth=300, valueType=float, orientation="horizontal")
self.modify_box_1_3 = oasysgui.widgetBox(self.kind_of_profile_x_box_2, "", addSpace=False, orientation="vertical", height=70)
self.le_new_length_x_2 = oasysgui.lineEdit(self.modify_box_1_3, self, "new_length_x", "New Length", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.modify_box_1_3, self, "filler_value_x", "Filler Value (if new length > profile length) [nm]", labelWidth=300, valueType=float, orientation="horizontal")
self.set_ModifyX()
gui.comboBox(self.kind_of_profile_x_box_2, self, "renormalize_x", label="Renormalize to different RMS", labelWidth=260,
items=["No", "Yes"], callback=self.set_KindOfProfileX, sendSelectedValue=False, orientation="horizontal")
self.kind_of_profile_x_box_2_1 = oasysgui.widgetBox(self.kind_of_profile_x_box_2, "", addSpace=True, orientation="vertical")
gui.comboBox(self.kind_of_profile_x_box_2_1, self, "error_type_x", label="Normalization to", labelWidth=270,
items=["Figure Error (nm)", "Slope Error (" + "\u03BC" + "rad)"],
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.kind_of_profile_x_box_2_1, self, "rms_x", "Rms Value",
labelWidth=260, valueType=float, orientation="horizontal")
self.set_KindOfProfileX()
##----------------------------------
output_box = oasysgui.widgetBox(tab_gener, "Outputs", addSpace=True, orientation="vertical")
select_file_box = oasysgui.widgetBox(output_box, "", addSpace=True, orientation="horizontal")
self.le_heigth_profile_file_name = oasysgui.lineEdit(select_file_box, self, "heigth_profile_file_name", "Output File Name",
labelWidth=120, valueType=str, orientation="horizontal")
gui.button(select_file_box, self, "...", callback=self.selectFile)
self.shadow_output = oasysgui.textArea(height=400)
out_box = oasysgui.widgetBox(tab_out, "System Output", addSpace=True, orientation="horizontal", height=500)
out_box.layout().addWidget(self.shadow_output)
gui.rubber(self.controlArea)
self.initializeTabs()
gui.rubber(self.mainArea)
self.overlay_search.raise_()
def resizeEvent(self, event):
self.overlay_search.resize(self.CONTROL_AREA_WIDTH - 15, 290)
event.accept()
def get_usage_path(self):
pass
@classmethod
def get_dabam_output(cls):
return {"name": "DABAM 1D Profile",
"type": numpy.ndarray,
"doc": "DABAM 1D Profile",
"id": "DABAM 1D Profile"}
def after_change_workspace_units(self):
self.si_to_user_units = 1.0
self.horHeaders = ["Entry", "Shape", "Length\n[m]", "Heights St.Dev.\n[nm]", "Slopes St.Dev.\n[" + u"\u03BC" + "rad]"]
self.table.setHorizontalHeaderLabels(self.horHeaders)
self.plot_canvas[0].setGraphXLabel("Y [m]")
self.plot_canvas[1].setGraphXLabel("Y [m]")
self.axis.set_xlabel("X [m]")
self.axis.set_ylabel("Y [m]")
label = self.le_dimension_y_from.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [m]")
label = self.le_dimension_y_to.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [m]")
label = self.le_new_length_1.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [m]")
label = self.le_new_length_2.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [m]")
label = self.le_dimension_x.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [m]")
label = self.le_step_x.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [m]")
label = self.le_correlation_length_x.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [m]")
label = self.le_conversion_factor_x_x.parent().layout().itemAt(0).widget()
label.setText("Conversion from file to meters\n(Abscissa)")
label = self.le_conversion_factor_x_y.parent().layout().itemAt(0).widget()
label.setText("Conversion from file to meters\n(Height Profile Values)")
label = self.le_new_length_x_1.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [m]")
label = self.le_new_length_x_2.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [m]")
def initializeTabs(self):
self.tabs = oasysgui.tabWidget(self.mainArea)
self.tab = [oasysgui.createTabPage(self.tabs, "Info"),
oasysgui.createTabPage(self.tabs, "Heights Profile"),
oasysgui.createTabPage(self.tabs, "Slopes Profile"),
oasysgui.createTabPage(self.tabs, "PSD Heights"),
oasysgui.createTabPage(self.tabs, "CSD Heights"),
oasysgui.createTabPage(self.tabs, "ACF"),
oasysgui.createTabPage(self.tabs, "Generated 2D Profile"),
]
for tab in self.tab:
tab.setFixedHeight(self.IMAGE_HEIGHT)
tab.setFixedWidth(self.IMAGE_WIDTH)
self.plot_canvas = [None, None, None, None, None, None]
self.plot_canvas[0] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[0].setDefaultPlotLines(True)
self.plot_canvas[0].setActiveCurveColor(color='blue')
self.plot_canvas[0].setGraphYLabel("Z [nm]")
self.plot_canvas[0].setGraphTitle("Heights Profile")
self.plot_canvas[0].setInteractiveMode(mode='zoom')
self.plot_canvas[1] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[1].setDefaultPlotLines(True)
self.plot_canvas[1].setActiveCurveColor(color='blue')
self.plot_canvas[1].setGraphYLabel("Zp [$\mu$rad]")
self.plot_canvas[1].setGraphTitle("Slopes Profile")
self.plot_canvas[1].setInteractiveMode(mode='zoom')
self.plot_canvas[2] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[2].setDefaultPlotLines(True)
self.plot_canvas[2].setActiveCurveColor(color='blue')
self.plot_canvas[2].setGraphXLabel("f [m^-1]")
self.plot_canvas[2].setGraphYLabel("PSD [m^3]")
self.plot_canvas[2].setGraphTitle("Power Spectral Density of Heights Profile")
self.plot_canvas[2].setInteractiveMode(mode='zoom')
self.plot_canvas[2].setXAxisLogarithmic(True)
self.plot_canvas[2].setYAxisLogarithmic(True)
self.plot_canvas[3] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[3].setDefaultPlotLines(True)
self.plot_canvas[3].setActiveCurveColor(color='blue')
self.plot_canvas[3].setGraphXLabel("f [m^-1]")
self.plot_canvas[3].setGraphYLabel("CSD [m^3]")
self.plot_canvas[3].setGraphTitle("Cumulative Spectral Density of Heights Profile")
self.plot_canvas[3].setInteractiveMode(mode='zoom')
self.plot_canvas[3].setXAxisLogarithmic(True)
self.plot_canvas[4] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[4].setDefaultPlotLines(True)
self.plot_canvas[4].setActiveCurveColor(color='blue')
self.plot_canvas[4].setGraphXLabel("Length [m]")
self.plot_canvas[4].setGraphYLabel("ACF")
self.plot_canvas[4].setGraphTitle("Autocovariance Function of Heights Profile")
self.plot_canvas[4].setInteractiveMode(mode='zoom')
self.figure = Figure(figsize=(self.IMAGE_HEIGHT, self.IMAGE_HEIGHT)) # QUADRATA!
self.figure.patch.set_facecolor('white')
self.axis = self.figure.add_subplot(111, projection='3d')
self.axis.set_zlabel("Z [nm]")
self.plot_canvas[5] = FigureCanvasQTAgg(self.figure)
self.profileInfo = oasysgui.textArea(height=self.IMAGE_HEIGHT-5, width=400)
profile_box = oasysgui.widgetBox(self.tab[0], "", addSpace=True, orientation="horizontal", height = self.IMAGE_HEIGHT, width=410)
profile_box.layout().addWidget(self.profileInfo)
for index in range(0, 6):
self.tab[index+1].layout().addWidget(self.plot_canvas[index])
self.tabs.setCurrentIndex(1)
def plot_dabam_graph(self, plot_canvas_index, curve_name, x_values, y_values, xtitle, ytitle, color='blue', replace=True):
self.plot_canvas[plot_canvas_index].addCurve(x_values, y_values, curve_name, symbol='', color=color, replace=replace) #'+', '^', ','
self.plot_canvas[plot_canvas_index].setGraphXLabel(xtitle)
self.plot_canvas[plot_canvas_index].setGraphYLabel(ytitle)
self.plot_canvas[plot_canvas_index].replot()
def set_ModifyY(self):
self.modify_box_1.setVisible(self.modify_y == 0)
self.modify_box_2.setVisible(self.modify_y == 1)
self.modify_box_3.setVisible(self.modify_y == 2)
def set_RenormalizeY(self):
self.output_profile_box_1.setVisible(self.renormalize_y==1)
self.output_profile_box_2.setVisible(self.renormalize_y==0)
def set_KindOfProfileX(self):
self.kind_of_profile_x_box_1.setVisible(self.kind_of_profile_x<2 or self.kind_of_profile_x==3)
self.kind_of_profile_x_box_1_0.setVisible(self.kind_of_profile_x<2)
self.kind_of_profile_x_box_1_1.setVisible(self.kind_of_profile_x==0)
self.kind_of_profile_x_box_1_2.setVisible(self.kind_of_profile_x==1)
self.kind_of_profile_x_box_1_3.setVisible(self.kind_of_profile_x<2)
self.kind_of_profile_x_box_2.setVisible(self.kind_of_profile_x==2)
self.kind_of_profile_x_box_2_1.setVisible(self.kind_of_profile_x==2 and self.renormalize_x==1)
def set_ModifyX(self):
self.modify_box_1_1.setVisible(self.modify_x == 0)
self.modify_box_1_2.setVisible(self.modify_x == 1)
self.modify_box_1_3.setVisible(self.modify_x == 2)
def table_item_clicked(self):
if self.table.selectionModel().hasSelection():
if not self.table.rowCount() == 0:
if not self.table.item(0, 0) is None:
row = self.table.selectionModel().selectedRows()[0].row()
self.entry_number = int(self.table.item(row, 0).text())
self.retrieve_profile()
def retrieve_profile(self):
try:
if self.entry_number is None or self.entry_number <= 0:
raise Exception("Entry number should be a strictly positive integer number")
self.server.load(self.entry_number)
self.profileInfo.setText(self.server.info_profiles())
self.plot_canvas[0].setGraphTitle(
"Heights Profile. St.Dev.=%.3f nm" % (self.server.stdev_profile_heights() * 1e9))
self.plot_canvas[1].setGraphTitle(
"Slopes Profile. St.Dev.=%.3f $\mu$rad" % (self.server.stdev_profile_slopes() * 1e6))
if self.use_undetrended == 0:
self.plot_dabam_graph(0, "heights_profile", self.si_to_user_units * self.server.y,
1e9 * self.server.zHeights, "Y [" + self.workspace_units_label + "]", "Z [nm]")
self.plot_dabam_graph(1, "slopes_profile", self.si_to_user_units * self.server.y, 1e6 * self.server.zSlopes,
"Y [" + self.workspace_units_label + "]", "Zp [$\mu$rad]")
else:
self.plot_dabam_graph(0, "heights_profile", self.si_to_user_units * self.server.y,
1e9 * self.server.zHeightsUndetrended, "Y [" + self.workspace_units_label + "]",
"Z [nm]")
self.plot_dabam_graph(1, "slopes_profile", self.si_to_user_units * self.server.y,
1e6 * self.server.zSlopesUndetrended, "Y [" + self.workspace_units_label + "]",
"Zp [$\mu$rad]")
y = self.server.f ** (self.server.powerlaw["hgt_pendent"]) * 10 ** self.server.powerlaw["hgt_shift"]
i0 = self.server.powerlaw["index_from"]
i1 = self.server.powerlaw["index_to"]
beta = -self.server.powerlaw["hgt_pendent"]
self.plot_canvas[2].setGraphTitle(
"Power Spectral Density of Heights Profile (beta=%.2f,Df=%.2f)" % (beta, (5 - beta) / 2))
self.plot_dabam_graph(2, "psd_heights_2", self.server.f, self.server.psdHeights, "f [m^-1]", "PSD [m^3]")
self.plot_dabam_graph(2, "psd_heights_1", self.server.f, y, "f [m^-1]", "PSD [m^3]", color='green',
replace=False)
self.plot_dabam_graph(2, "psd_heights_3", self.server.f[i0:i1], y[i0:i1], "f [m^-1]", "PSD [m^3]", color='red',
replace=False)
self.plot_dabam_graph(3, "csd", self.server.f, self.server.csd_heights(), "f [m^-1]", "CSD [m^3]")
c1, c2, c3 = dabam.autocorrelationfunction(self.server.y, self.server.zHeights)
self.plot_canvas[4].setGraphTitle(
"Autocovariance Function of Heights Profile.\nAutocorrelation Length (ACF=0.5)=%.3f m" % (c3))
self.plot_dabam_graph(4, "acf", c1[0:-1], c2, "Length [m]", "Heights Autocovariance")
# surface error removal
if not self.zz is None and not self.yy is None and not self.xx is None:
self.xx = None
self.yy = None
self.zz = None
self.axis.set_title("")
self.axis.clear()
self.plot_canvas[5].draw()
if (self.tabs.currentIndex()==6): self.tabs.setCurrentIndex(1)
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def send_profile(self):
try:
if self.server.y is None: raise Exception("No Profile Selected")
dabam_y = self.server.y
dabam_profile = numpy.zeros((len(dabam_y), 2))
dabam_profile[:, 0] = dabam_y
dabam_profile[:, 1] = self.server.zHeights
self.send("DABAM 1D Profile", dabam_profile)
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def search_profiles(self):
try:
self.table.itemClicked.disconnect(self.table_item_clicked)
self.table.clear()
row_count = self.table.rowCount()
for n in range(0, row_count):
self.table.removeRow(0)
self.table.setHorizontalHeaderLabels(self.horHeaders)
profiles = dabam.dabam_summary_dictionary(surface=self.get_dabam_shape(),
slp_err_from=self.slope_error_from*1e-6,
slp_err_to=self.slope_error_to*1e-6,
length_from=self.dimension_y_from / self.si_to_user_units,
length_to=self.dimension_y_to / self.si_to_user_units)
for index in range(0, len(profiles)):
self.table.insertRow(0)
for index in range(0, len(profiles)):
table_item = QTableWidgetItem(str(profiles[index]["entry"]))
table_item.setTextAlignment(Qt.AlignCenter)
self.table.setItem(index, 0, table_item)
table_item = QTableWidgetItem(str(profiles[index]["surface"]))
table_item.setTextAlignment(Qt.AlignLeft)
self.table.setItem(index, 1, table_item)
table_item = QTableWidgetItem(str(numpy.round(profiles[index]["length"]*self.si_to_user_units, 3)))
table_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 2, table_item)
table_item = QTableWidgetItem(str(numpy.round(profiles[index]["hgt_err"]*1e9, 3)))
table_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 3, table_item)
table_item = QTableWidgetItem(str(numpy.round(profiles[index]["slp_err"]*1e6, 3)))
table_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 4, table_item)
self.table.setHorizontalHeaderLabels(self.horHeaders)
self.table.resizeRowsToContents()
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.itemClicked.connect(self.table_item_clicked)
self.overlay_search.hide()
except Exception as exception:
self.overlay_search.hide()
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
def get_dabam_shape(self):
if self.shape == 0: return None
elif self.shape == 1: return "plane"
elif self.shape == 2: return "cylindrical"
elif self.shape == 3: return "elliptical"
elif self.shape == 4: return "toroidal"
elif self.shape == 5: return "spherical"
def calculate_heigth_profile_ni(self):
self.calculate_heigth_profile(not_interactive_mode=True)
def calculate_heigth_profile(self, not_interactive_mode=False):
try:
if self.server.y is None: raise Exception("No Profile Selected")
sys.stdout = EmittingStream(textWritten=self.writeStdOut)
self.check_fields()
combination = "E"
if self.modify_y == 2:
profile_1D_y_x_temp = self.si_to_user_units * self.server.y
if self.use_undetrended == 0: profile_1D_y_y_temp = self.si_to_user_units * self.server.zHeights
else: profile_1D_y_y_temp = self.si_to_user_units * self.server.zHeightsUndetrended
first_coord = profile_1D_y_x_temp[0]
second_coord = profile_1D_y_x_temp[1]
last_coord = profile_1D_y_x_temp[-1]
step = numpy.abs(second_coord - first_coord)
length = numpy.abs(last_coord - first_coord)
n_points_old = len(profile_1D_y_x_temp)
if self.new_length_y > length:
difference = self.new_length_y - length
n_added_points = int(difference/step)
if difference % step == 0:
n_added_points += 1
if n_added_points % 2 != 0:
n_added_points += 1
profile_1D_y_x = numpy.arange(n_added_points + n_points_old) * step
profile_1D_y_y = numpy.ones(n_added_points + n_points_old) * self.filler_value_y * 1e-9 * self.si_to_user_units
profile_1D_y_y[int(n_added_points/2) : n_points_old + int(n_added_points/2)] = profile_1D_y_y_temp
elif self.new_length_y < length:
difference = length - self.new_length_y
n_removed_points = int(difference/step)
if difference % step == 0:
n_removed_points -= 1
if n_removed_points % 2 != 0:
n_removed_points -= 1
if n_removed_points >= 2:
profile_1D_y_x = profile_1D_y_x_temp[0 : (n_points_old - n_removed_points)]
profile_1D_y_y = profile_1D_y_y_temp[(int(n_removed_points/2) - 1) : (n_points_old - int(n_removed_points/2) - 1)]
else:
profile_1D_y_x = profile_1D_y_x_temp
profile_1D_y_y = profile_1D_y_y_temp
else:
profile_1D_y_x = profile_1D_y_x_temp
profile_1D_y_y = profile_1D_y_y_temp
else:
if self.modify_y == 0:
profile_1D_y_x = self.si_to_user_units * self.server.y
elif self.modify_y == 1:
scale_factor_y = self.new_length_y / (self.si_to_user_units * (max(self.server.y) - min(self.server.y)))
profile_1D_y_x = self.si_to_user_units * self.server.y * scale_factor_y
if self.use_undetrended == 0: profile_1D_y_y = self.si_to_user_units * self.server.zHeights
else: profile_1D_y_y = self.si_to_user_units * self.server.zHeightsUndetrended
if self.center_y:
first_coord = profile_1D_y_x[0]
last_coord = profile_1D_y_x[-1]
length = numpy.abs(last_coord - first_coord)
profile_1D_y_x_temp = numpy.linspace(-length/2, length/2, len(profile_1D_y_x))
profile_1D_y_x = profile_1D_y_x_temp
if self.renormalize_y == 0:
rms_y = None
else:
if self.error_type_y == profiles_simulation.FIGURE_ERROR:
rms_y = self.rms_y * 1e-9 * self.si_to_user_units # from nm to m
else:
rms_y = self.rms_y * 1e-6 # from urad to rad
#### WIDTH
if self.kind_of_profile_x == 3:
combination += "F"
xx, yy, zz = profiles_simulation.simulate_profile_2D(combination = combination,
error_type_l = self.error_type_y,
rms_l = rms_y,
x_l = profile_1D_y_x,
y_l = profile_1D_y_y,
mirror_width = self.dimension_x,
step_w = self.step_x,
rms_w = 0.0)
else:
if self.kind_of_profile_x == 2:
combination += "E"
if self.delimiter_x == 1:
profile_1D_x_x, profile_1D_x_y = numpy.loadtxt(self.heigth_profile_1D_file_name_x, delimiter='\t', unpack=True)
else:
profile_1D_x_x, profile_1D_x_y = numpy.loadtxt(self.heigth_profile_1D_file_name_x, unpack=True)
profile_1D_x_x *= self.conversion_factor_x_x
profile_1D_x_y *= self.conversion_factor_x_y
first_coord = profile_1D_x_x[0]
second_coord = profile_1D_x_x[1]
last_coord = profile_1D_x_x[-1]
step = numpy.abs(second_coord - first_coord)
length = numpy.abs(last_coord - first_coord)
n_points_old = len(profile_1D_x_x)
if self.modify_x == 2:
profile_1D_x_x_temp = profile_1D_x_x
profile_1D_x_y_temp = profile_1D_x_y
if self.new_length_x > length:
difference = self.new_length_x - length
n_added_points = int(difference/step)
if difference % step == 0:
n_added_points += 1
if n_added_points % 2 != 0:
n_added_points += 1
profile_1D_x_x = numpy.arange(n_added_points + n_points_old) * step
profile_1D_x_y = numpy.ones(n_added_points + n_points_old) * self.filler_value_x * 1e-9 * self.si_to_user_units
profile_1D_x_y[int(n_added_points/2) : n_points_old + int(n_added_points/2)] = profile_1D_x_y_temp
elif self.new_length_x < length:
difference = length - self.new_length_x
n_removed_points = int(difference/step)
if difference % step == 0:
n_removed_points -= 1
if n_removed_points % 2 != 0:
n_removed_points -= 1
if n_removed_points >= 2:
profile_1D_x_x = profile_1D_x_x_temp[0 : (n_points_old - n_removed_points)]
profile_1D_x_y = profile_1D_x_y_temp[(int(n_removed_points/2) - 1) : (n_points_old - int(n_removed_points/2) - 1)]
else:
profile_1D_x_x = profile_1D_x_x_temp
profile_1D_x_y = profile_1D_x_y_temp
else:
profile_1D_x_x = profile_1D_x_x_temp
profile_1D_x_y = profile_1D_x_y_temp
elif self.modify_x == 1:
scale_factor_x = self.new_length_x/length
profile_1D_x_x *= scale_factor_x
if self.center_x:
first_coord = profile_1D_x_x[0]
last_coord = profile_1D_x_x[-1]
length = numpy.abs(last_coord - first_coord)
profile_1D_x_x_temp = numpy.linspace(-length/2, length/2, len(profile_1D_x_x))
profile_1D_x_x = profile_1D_x_x_temp
if self.renormalize_x == 0:
rms_x = None
else:
if self.error_type_x == profiles_simulation.FIGURE_ERROR:
rms_x = self.rms_x * 1e-9 * self.si_to_user_units # from nm to m
else:
rms_x = self.rms_x * 1e-6 # from urad to rad
else:
profile_1D_x_x = None
profile_1D_x_y = None
if self.kind_of_profile_x == 0: combination += "F"
else: combination += "G"
if self.error_type_x == profiles_simulation.FIGURE_ERROR:
rms_x = self.rms_x * 1e-9 * self.si_to_user_units # from nm to m
else:
rms_x = self.rms_x * 1e-6 # from urad to rad
xx, yy, zz = profiles_simulation.simulate_profile_2D(combination = combination,
error_type_l = self.error_type_y,
rms_l = rms_y,
x_l = profile_1D_y_x,
y_l = profile_1D_y_y,
mirror_width = self.dimension_x,
step_w = self.step_x,
random_seed_w = self.montecarlo_seed_x,
error_type_w = self.error_type_x,
rms_w = rms_x,
power_law_exponent_beta_w = self.power_law_exponent_beta_x,
correlation_length_w = self.correlation_length_x,
x_w = profile_1D_x_x,
y_w = profile_1D_x_y)
self.xx = xx
self.yy = yy
self.zz = zz # in user units
self.axis.clear()
x_to_plot, y_to_plot = numpy.meshgrid(xx, yy)
z_to_plot = zz * 1e9 / self.si_to_user_units #nm
self.axis.plot_surface(x_to_plot, y_to_plot, z_to_plot,
rstride=1, cstride=1, cmap=cm.autumn, linewidth=0.5, antialiased=True)
sloperms = profiles_simulation.slopes(zz.T, xx, yy, return_only_rms=1)
title = ' Slope error rms in X direction: %f $\mu$rad' % (sloperms[0]*1e6) + '\n' + \
' Slope error rms in Y direction: %f $\mu$rad' % (sloperms[1]*1e6) + '\n' + \
' Figure error rms in X direction: %f nm' % (round(zz[0, :].std()*1e9/self.si_to_user_units, 6)) + '\n' + \
' Figure error rms in Y direction: %f nm' % (round(zz[:, 0].std()*1e9/self.si_to_user_units, 6))
self.axis.set_xlabel("X [" + self.get_axis_um() + "]")
self.axis.set_ylabel("Y [" + self.get_axis_um() + "]")
self.axis.set_zlabel("Z [nm]")
self.axis.set_title(title)
self.axis.mouse_init()
if not not_interactive_mode:
self.tabs.setCurrentIndex(6)
if self.plotted: self.plot_canvas[5].draw()
else: self.plotted = True
QMessageBox.information(self, "QMessageBox.information()",
"Height Profile calculated: if the result is satisfactory,\nclick \'Generate Height Profile File\' to complete the operation ",
QMessageBox.Ok)
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def generate_heigth_profile_file_ni(self):
self.generate_heigth_profile_file(not_interactive_mode=True)
def generate_heigth_profile_file(self, not_interactive_mode=False):
if not self.zz is None and not self.yy is None and not self.xx is None:
try:
congruence.checkDir(self.heigth_profile_file_name)
sys.stdout = EmittingStream(textWritten=self.writeStdOut)
congruence.checkFileName(self.heigth_profile_file_name)
self.write_error_profile_file()
if not not_interactive_mode:
QMessageBox.information(self, "QMessageBox.information()",
"Height Profile file " + self.heigth_profile_file_name + " written on disk",
QMessageBox.Ok)
dimension_x = self.dimension_x
if self.kind_of_profile_x == 2: #user defined
dimension_x = (self.xx[-1] - self.xx[0])
if self.modify_y == 0:
dimension_y = self.si_to_user_units * (self.server.y[-1] - self.server.y[0])
if self.modify_y == 1 or self.modify_y == 2:
dimension_y = self.new_length_y
self.send_data(dimension_x, dimension_y)
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
def write_error_profile_file(self):
raise NotImplementedError("This method is abstract")
def send_data(self, dimension_x, dimension_y):
raise NotImplementedError("This method is abstract")
def call_reset_settings(self):
if ConfirmDialog.confirmed(parent=self, message="Confirm Reset of the Fields?"):
try:
self.resetSettings()
except:
pass
def check_fields(self):
congruence.checkLessOrEqualThan(self.step_x, self.dimension_x/2, "Step Width", "Width/2")
if self.modify_y == 1 or self.modify_y == 2:
self.new_length_y = congruence.checkStrictlyPositiveNumber(self.new_length_y, "New Length")
if self.renormalize_y == 1:
self.rms_y = congruence.checkPositiveNumber(self.rms_y, "Rms Y")
congruence.checkDir(self.heigth_profile_file_name)
if self.kind_of_profile_x == 3:
self.dimension_x = congruence.checkStrictlyPositiveNumber(self.dimension_x, "Dimension X")
self.step_x = congruence.checkStrictlyPositiveNumber(self.step_x, "Step X")
elif self.kind_of_profile_x < 2:
self.dimension_x = congruence.checkStrictlyPositiveNumber(self.dimension_x, "Dimension X")
self.step_x = congruence.checkStrictlyPositiveNumber(self.step_x, "Step X")
if self.kind_of_profile_x == 0: self.power_law_exponent_beta_x = congruence.checkPositiveNumber(self.power_law_exponent_beta_x, "Beta Value X")
if self.kind_of_profile_x == 1: self.correlation_length_x = congruence.checkStrictlyPositiveNumber(self.correlation_length_x, "Correlation Length X")
self.rms_x = congruence.checkPositiveNumber(self.rms_x, "Rms X")
self.montecarlo_seed_x = congruence.checkPositiveNumber(self.montecarlo_seed_x, "Monte Carlo initial seed X")
elif self.kind_of_profile_x == 2:
congruence.checkFile(self.heigth_profile_1D_file_name_x)
self.conversion_factor_x_x = congruence.checkStrictlyPositiveNumber(self.conversion_factor_x_x, "Conversion from file to meters (Abscissa)")
self.conversion_factor_x_y = congruence.checkStrictlyPositiveNumber(self.conversion_factor_x_y, "Conversion from file to meters (Height Profile Values)")
if self.modify_x > 0:
self.new_length_x = congruence.checkStrictlyPositiveNumber(self.new_length_x, "New Length")
if self.renormalize_x == 1:
self.rms_x = congruence.checkPositiveNumber(self.rms_x, "Rms X")
def writeStdOut(self, text):
cursor = self.shadow_output.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.shadow_output.setTextCursor(cursor)
self.shadow_output.ensureCursorVisible()
def selectFile1D_X(self):
self.le_heigth_profile_1D_file_name_x.setText(oasysgui.selectFileFromDialog(self, self.heigth_profile_1D_file_name_x, "Select 1D Height Profile File", file_extension_filter="Data Files (*.dat *.txt)"))
def selectFile(self):
self.le_heigth_profile_file_name.setText(oasysgui.selectFileFromDialog(self, self.heigth_profile_file_name, "Select Output File", file_extension_filter="Data Files (*.dat)"))
def get_axis_um(self):
return "m"
class Overlay(QWidget):
def __init__(self, container_widget=None, target_method=None):
QWidget.__init__(self, container_widget)
self.container_widget = container_widget
self.target_method = target_method
palette = QPalette(self.palette())
palette.setColor(palette.Background, Qt.transparent)
self.setPalette(palette)
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), QBrush(QColor(255, 255, 255, 127)))
painter.setPen(QPen(Qt.NoPen))
for i in range(1, 7):
if self.position_index == i:
painter.setBrush(QBrush(QColor(255, 165, 0)))
else:
painter.setBrush(QBrush(QColor(127, 127, 127)))
painter.drawEllipse(
self.width()/2 + 30 * numpy.cos(2 * numpy.pi * i / 6.0) - 10,
self.height()/2 + 30 * numpy.sin(2 * numpy.pi * i / 6.0) - 10,
20, 20)
time.sleep(0.005)
painter.end()
def showEvent(self, event):
self.timer = self.startTimer(0)
self.counter = 0
self.position_index = 0
t = threading.Thread(target=self.target_method)
t.start()
def hideEvent(self, QHideEvent):
self.killTimer(self.timer)
def timerEvent(self, event):
self.counter += 1
self.position_index += 1
if self.position_index == 7: self.position_index = 1
self.update()
| gpl-3.0 |
yavuzovski/playground | machine learning/Udacity/ud120-projects/outliers/enron_outliers.py | 1 | 1299 | #!/usr/bin/python
import pickle
import sys
import matplotlib.pyplot
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
import numpy as np
### read in data dictionary, convert to numpy array
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
features = ["poi", "salary", "from_poi_to_this_person", "from_messages", "from_this_person_to_poi", "to_messages"]
data_dict.pop('TOTAL', 0)
data = featureFormat(data_dict, features)
### your code below
def percent_calculator(percentile, full):
return percentile * 1.0 / (full + percentile) if percentile != 0 and full != 0 else 0
for point in data:
poi = point[0]
salary = point[1]
from_poi = point[2]
from_all = point[3]
to_poi = point[4]
to_all = point[5]
# calculates the percents of from_poi
from_poi_percent = percent_calculator(from_poi, from_all)
to_poi_percent = percent_calculator(to_poi, to_all)
# poi percentile for both send and received messages
full_percentile = percent_calculator(from_poi + to_poi, from_all + to_all)
matplotlib.pyplot.scatter(salary, full_percentile, c="k" if poi == 1.0 else "w")
matplotlib.pyplot.xlabel(features[1])
matplotlib.pyplot.ylabel("full_percentile")
matplotlib.pyplot.show()
| gpl-3.0 |
OXPHOS/shogun | applications/tapkee/swissroll_embedding.py | 12 | 2600 | import numpy
numpy.random.seed(40)
tt = numpy.genfromtxt('../../data/toy/swissroll_color.dat',unpack=True).T
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
N = X.shape[1]
converters = []
from shogun import LocallyLinearEmbedding
lle = LocallyLinearEmbedding()
lle.set_k(9)
converters.append((lle, "LLE with k=%d" % lle.get_k()))
from shogun import MultidimensionalScaling
mds = MultidimensionalScaling()
converters.append((mds, "Classic MDS"))
lmds = MultidimensionalScaling()
lmds.set_landmark(True)
lmds.set_landmark_number(20)
converters.append((lmds,"Landmark MDS with %d landmarks" % lmds.get_landmark_number()))
from shogun import Isomap
cisomap = Isomap()
cisomap.set_k(9)
converters.append((cisomap,"Isomap with k=%d" % cisomap.get_k()))
from shogun import DiffusionMaps
from shogun import GaussianKernel
dm = DiffusionMaps()
dm.set_t(2)
dm.set_width(1000.0)
converters.append((dm,"Diffusion Maps with t=%d, sigma=%.1f" % (dm.get_t(),dm.get_width())))
from shogun import HessianLocallyLinearEmbedding
hlle = HessianLocallyLinearEmbedding()
hlle.set_k(6)
converters.append((hlle,"Hessian LLE with k=%d" % (hlle.get_k())))
from shogun import LocalTangentSpaceAlignment
ltsa = LocalTangentSpaceAlignment()
ltsa.set_k(6)
converters.append((ltsa,"LTSA with k=%d" % (ltsa.get_k())))
from shogun import LaplacianEigenmaps
le = LaplacianEigenmaps()
le.set_k(20)
le.set_tau(100.0)
converters.append((le,"Laplacian Eigenmaps with k=%d, tau=%d" % (le.get_k(),le.get_tau())))
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
new_mpl = False
try:
swiss_roll_fig = fig.add_subplot(3,3,1, projection='3d')
new_mpl = True
except:
figure = plt.figure()
swiss_roll_fig = Axes3D(figure)
swiss_roll_fig.scatter(X[0], X[1], X[2], s=10, c=tt, cmap=plt.cm.Spectral)
swiss_roll_fig._axis3don = False
plt.suptitle('Swissroll embedding',fontsize=9)
plt.subplots_adjust(hspace=0.4)
from shogun import RealFeatures
for (i, (converter, label)) in enumerate(converters):
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
features = RealFeatures(X)
converter.set_target_dim(2)
converter.parallel.set_num_threads(1)
new_feats = converter.embed(features).get_feature_matrix()
if not new_mpl:
embedding_subplot = fig.add_subplot(4,2,i+1)
else:
embedding_subplot = fig.add_subplot(3,3,i+2)
embedding_subplot.scatter(new_feats[0],new_feats[1], c=tt, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title(label,fontsize=9)
print converter.get_name(), 'done'
plt.show()
| gpl-3.0 |
btabibian/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
NunoEdgarGub1/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/_cm.py | 70 | 375423 | """
Color data and pre-defined cmap objects.
This is a helper for cm.py, originally part of that file.
Separating the data (this file) from cm.py makes both easier
to deal with.
Objects visible in cm.py are the individual cmap objects ('autumn',
etc.) and a dictionary, 'datad', including all of these objects.
"""
import matplotlib as mpl
import matplotlib.colors as colors
LUTSIZE = mpl.rcParams['image.lut']
_binary_data = {
'red' : ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue' : ((0., 1., 1.), (1., 0., 0.))
}
_bone_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 1.0, 1.0))}
_autumn_data = {'red': ((0., 1.0, 1.0),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),(0.746032, 0.652778, 0.652778),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.365079, 0.444444, 0.444444),(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),(0.809524, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),(1.0, 0.4975, 0.4975))}
_flag_data = {'red': ((0., 1., 1.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 1.000000, 1.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 1.000000, 1.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 1.000000, 1.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 1.000000, 1.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 1.000000, 1.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 0.000000, 0.000000),(1.0, 0., 0.)),
'green': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 1.000000, 1.000000),(0.095238, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.190476, 0.000000, 0.000000),
(0.206349, 1.000000, 1.000000),(0.222222, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 0.000000, 0.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 1.000000, 1.000000),(0.476190, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.571429, 0.000000, 0.000000),
(0.587302, 1.000000, 1.000000),(0.603175, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 0.000000, 0.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.952381, 0.000000, 0.000000),
(0.968254, 1.000000, 1.000000),(0.984127, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 1.000000, 1.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 0.000000, 0.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 1.000000, 1.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 1.000000, 1.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 1.000000, 1.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 0.000000, 0.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 1.000000, 1.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 0.000000, 0.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 1.000000, 1.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 1.000000, 1.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 1.000000, 1.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 0.000000, 0.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 1.000000, 1.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 0.000000, 0.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 1.000000, 1.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 1.000000, 1.000000),(1.0, 0., 0.))}
_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
'green': ((0., 0, 0), (1., 1, 1)),
'blue': ((0., 0, 0), (1., 1, 1))}
_hot_data = {'red': ((0., 0.0416, 0.0416),(0.365079, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.746032, 0.000000, 0.000000),(1.0, 1.0, 1.0))}
_hsv_data = {'red': ((0., 1., 1.),(0.158730, 1.000000, 1.000000),
(0.174603, 0.968750, 0.968750),(0.333333, 0.031250, 0.031250),
(0.349206, 0.000000, 0.000000),(0.666667, 0.000000, 0.000000),
(0.682540, 0.031250, 0.031250),(0.841270, 0.968750, 0.968750),
(0.857143, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.158730, 0.937500, 0.937500),
(0.174603, 1.000000, 1.000000),(0.507937, 1.000000, 1.000000),
(0.666667, 0.062500, 0.062500),(0.682540, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.333333, 0.000000, 0.000000),
(0.349206, 0.062500, 0.062500),(0.507937, 1.000000, 1.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.937500, 0.937500),
(1.0, 0.09375, 0.09375))}
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
_pink_data = {'red': ((0., 0.1178, 0.1178),(0.015873, 0.195857, 0.195857),
(0.031746, 0.250661, 0.250661),(0.047619, 0.295468, 0.295468),
(0.063492, 0.334324, 0.334324),(0.079365, 0.369112, 0.369112),
(0.095238, 0.400892, 0.400892),(0.111111, 0.430331, 0.430331),
(0.126984, 0.457882, 0.457882),(0.142857, 0.483867, 0.483867),
(0.158730, 0.508525, 0.508525),(0.174603, 0.532042, 0.532042),
(0.190476, 0.554563, 0.554563),(0.206349, 0.576204, 0.576204),
(0.222222, 0.597061, 0.597061),(0.238095, 0.617213, 0.617213),
(0.253968, 0.636729, 0.636729),(0.269841, 0.655663, 0.655663),
(0.285714, 0.674066, 0.674066),(0.301587, 0.691980, 0.691980),
(0.317460, 0.709441, 0.709441),(0.333333, 0.726483, 0.726483),
(0.349206, 0.743134, 0.743134),(0.365079, 0.759421, 0.759421),
(0.380952, 0.766356, 0.766356),(0.396825, 0.773229, 0.773229),
(0.412698, 0.780042, 0.780042),(0.428571, 0.786796, 0.786796),
(0.444444, 0.793492, 0.793492),(0.460317, 0.800132, 0.800132),
(0.476190, 0.806718, 0.806718),(0.492063, 0.813250, 0.813250),
(0.507937, 0.819730, 0.819730),(0.523810, 0.826160, 0.826160),
(0.539683, 0.832539, 0.832539),(0.555556, 0.838870, 0.838870),
(0.571429, 0.845154, 0.845154),(0.587302, 0.851392, 0.851392),
(0.603175, 0.857584, 0.857584),(0.619048, 0.863731, 0.863731),
(0.634921, 0.869835, 0.869835),(0.650794, 0.875897, 0.875897),
(0.666667, 0.881917, 0.881917),(0.682540, 0.887896, 0.887896),
(0.698413, 0.893835, 0.893835),(0.714286, 0.899735, 0.899735),
(0.730159, 0.905597, 0.905597),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.517549, 0.517549),(0.396825, 0.540674, 0.540674),
(0.412698, 0.562849, 0.562849),(0.428571, 0.584183, 0.584183),
(0.444444, 0.604765, 0.604765),(0.460317, 0.624669, 0.624669),
(0.476190, 0.643958, 0.643958),(0.492063, 0.662687, 0.662687),
(0.507937, 0.680900, 0.680900),(0.523810, 0.698638, 0.698638),
(0.539683, 0.715937, 0.715937),(0.555556, 0.732828, 0.732828),
(0.571429, 0.749338, 0.749338),(0.587302, 0.765493, 0.765493),
(0.603175, 0.781313, 0.781313),(0.619048, 0.796819, 0.796819),
(0.634921, 0.812029, 0.812029),(0.650794, 0.826960, 0.826960),
(0.666667, 0.841625, 0.841625),(0.682540, 0.856040, 0.856040),
(0.698413, 0.870216, 0.870216),(0.714286, 0.884164, 0.884164),
(0.730159, 0.897896, 0.897896),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.503953, 0.503953),(0.396825, 0.514344, 0.514344),
(0.412698, 0.524531, 0.524531),(0.428571, 0.534522, 0.534522),
(0.444444, 0.544331, 0.544331),(0.460317, 0.553966, 0.553966),
(0.476190, 0.563436, 0.563436),(0.492063, 0.572750, 0.572750),
(0.507937, 0.581914, 0.581914),(0.523810, 0.590937, 0.590937),
(0.539683, 0.599824, 0.599824),(0.555556, 0.608581, 0.608581),
(0.571429, 0.617213, 0.617213),(0.587302, 0.625727, 0.625727),
(0.603175, 0.634126, 0.634126),(0.619048, 0.642416, 0.642416),
(0.634921, 0.650600, 0.650600),(0.650794, 0.658682, 0.658682),
(0.666667, 0.666667, 0.666667),(0.682540, 0.674556, 0.674556),
(0.698413, 0.682355, 0.682355),(0.714286, 0.690066, 0.690066),
(0.730159, 0.697691, 0.697691),(0.746032, 0.705234, 0.705234),
(0.761905, 0.727166, 0.727166),(0.777778, 0.748455, 0.748455),
(0.793651, 0.769156, 0.769156),(0.809524, 0.789314, 0.789314),
(0.825397, 0.808969, 0.808969),(0.841270, 0.828159, 0.828159),
(0.857143, 0.846913, 0.846913),(0.873016, 0.865261, 0.865261),
(0.888889, 0.883229, 0.883229),(0.904762, 0.900837, 0.900837),
(0.920635, 0.918109, 0.918109),(0.936508, 0.935061, 0.935061),
(0.952381, 0.951711, 0.951711),(0.968254, 0.968075, 0.968075),
(0.984127, 0.984167, 0.984167),(1.0, 1.0, 1.0))}
_prism_data = {'red': ((0., 1., 1.),(0.031746, 1.000000, 1.000000),
(0.047619, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 0.666667, 0.666667),(0.095238, 1.000000, 1.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.666667, 0.666667),
(0.190476, 1.000000, 1.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 0.000000, 0.000000),(0.253968, 0.000000, 0.000000),
(0.269841, 0.666667, 0.666667),(0.285714, 1.000000, 1.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.666667, 0.666667),
(0.380952, 1.000000, 1.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 0.666667, 0.666667),(0.476190, 1.000000, 1.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.666667, 0.666667),
(0.571429, 1.000000, 1.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 0.000000, 0.000000),(0.634921, 0.000000, 0.000000),
(0.650794, 0.666667, 0.666667),(0.666667, 1.000000, 1.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.666667, 0.666667),
(0.761905, 1.000000, 1.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 0.666667, 0.666667),(0.857143, 1.000000, 1.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.666667, 0.666667),
(0.952381, 1.000000, 1.000000),(0.984127, 1.000000, 1.000000),
(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(0.031746, 1.000000, 1.000000),
(0.047619, 1.000000, 1.000000),(0.063492, 0.000000, 0.000000),
(0.095238, 0.000000, 0.000000),(0.126984, 1.000000, 1.000000),
(0.142857, 1.000000, 1.000000),(0.158730, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 1.000000, 1.000000),(0.253968, 0.000000, 0.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 1.000000, 1.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 1.000000, 1.000000),(0.444444, 0.000000, 0.000000),
(0.476190, 0.000000, 0.000000),(0.507937, 1.000000, 1.000000),
(0.523810, 1.000000, 1.000000),(0.539683, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 1.000000, 1.000000),(0.634921, 0.000000, 0.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 1.000000, 1.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 1.000000, 1.000000),(0.825397, 0.000000, 0.000000),
(0.857143, 0.000000, 0.000000),(0.888889, 1.000000, 1.000000),
(0.904762, 1.000000, 1.000000),(0.920635, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.984127, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 1.000000, 1.000000),
(0.190476, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 1.000000, 1.000000),
(0.380952, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 1.000000, 1.000000),
(0.571429, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 1.000000, 1.000000),
(0.761905, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 1.000000, 1.000000),
(0.952381, 0.000000, 0.000000),(1.0, 0.0, 0.0))}
_spring_data = {'red': ((0., 1., 1.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.0, 0.0))}
_summer_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0.5, 0.5),(1.0, 1.0, 1.0)),
'blue': ((0., 0.4, 0.4),(1.0, 0.4, 0.4))}
_winter_data = {'red': ((0., 0., 0.),(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.5, 0.5))}
_spectral_data = {'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
(0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
(0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
(0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
(0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
(0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
(0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
(0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
(1.0, 0.80, 0.80)],
'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
(0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
(0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
(0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
(0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
(0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
(0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
(0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
(0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
(0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
(0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
(0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
(0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
(0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
(0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)]}
autumn = colors.LinearSegmentedColormap('autumn', _autumn_data, LUTSIZE)
bone = colors.LinearSegmentedColormap('bone ', _bone_data, LUTSIZE)
binary = colors.LinearSegmentedColormap('binary ', _binary_data, LUTSIZE)
cool = colors.LinearSegmentedColormap('cool', _cool_data, LUTSIZE)
copper = colors.LinearSegmentedColormap('copper', _copper_data, LUTSIZE)
flag = colors.LinearSegmentedColormap('flag', _flag_data, LUTSIZE)
gray = colors.LinearSegmentedColormap('gray', _gray_data, LUTSIZE)
hot = colors.LinearSegmentedColormap('hot', _hot_data, LUTSIZE)
hsv = colors.LinearSegmentedColormap('hsv', _hsv_data, LUTSIZE)
jet = colors.LinearSegmentedColormap('jet', _jet_data, LUTSIZE)
pink = colors.LinearSegmentedColormap('pink', _pink_data, LUTSIZE)
prism = colors.LinearSegmentedColormap('prism', _prism_data, LUTSIZE)
spring = colors.LinearSegmentedColormap('spring', _spring_data, LUTSIZE)
summer = colors.LinearSegmentedColormap('summer', _summer_data, LUTSIZE)
winter = colors.LinearSegmentedColormap('winter', _winter_data, LUTSIZE)
spectral = colors.LinearSegmentedColormap('spectral', _spectral_data, LUTSIZE)
datad = {
'autumn': _autumn_data,
'bone': _bone_data,
'binary': _binary_data,
'cool': _cool_data,
'copper': _copper_data,
'flag': _flag_data,
'gray' : _gray_data,
'hot': _hot_data,
'hsv': _hsv_data,
'jet' : _jet_data,
'pink': _pink_data,
'prism': _prism_data,
'spring': _spring_data,
'summer': _summer_data,
'winter': _winter_data,
'spectral': _spectral_data
}
# 34 colormaps based on color specifications and designs
# developed by Cynthia Brewer (http://colorbrewer.org).
# The ColorBrewer palettes have been included under the terms
# of an Apache-stype license (for details, see the file
# LICENSE_COLORBREWER in the license directory of the matplotlib
# source distribution).
_Accent_data = {'blue': [(0.0, 0.49803921580314636,
0.49803921580314636), (0.14285714285714285, 0.83137255907058716,
0.83137255907058716), (0.2857142857142857, 0.52549022436141968,
0.52549022436141968), (0.42857142857142855, 0.60000002384185791,
0.60000002384185791), (0.5714285714285714, 0.69019609689712524,
0.69019609689712524), (0.7142857142857143, 0.49803921580314636,
0.49803921580314636), (0.8571428571428571, 0.090196080505847931,
0.090196080505847931), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.78823530673980713, 0.78823530673980713),
(0.14285714285714285, 0.68235296010971069, 0.68235296010971069),
(0.2857142857142857, 0.75294119119644165, 0.75294119119644165),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.42352941632270813, 0.42352941632270813), (0.7142857142857143,
0.0078431377187371254, 0.0078431377187371254),
(0.8571428571428571, 0.35686275362968445, 0.35686275362968445),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.14285714285714285, 0.7450980544090271, 0.7450980544090271),
(0.2857142857142857, 0.99215686321258545, 0.99215686321258545),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.21960784494876862, 0.21960784494876862), (0.7142857142857143,
0.94117647409439087, 0.94117647409439087), (0.8571428571428571,
0.74901962280273438, 0.74901962280273438), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_Blues_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.93725490570068359, 0.93725490570068359),
(0.375, 0.88235294818878174, 0.88235294818878174), (0.5,
0.83921569585800171, 0.83921569585800171), (0.625, 0.7764706015586853,
0.7764706015586853), (0.75, 0.70980393886566162, 0.70980393886566162),
(0.875, 0.61176472902297974, 0.61176472902297974), (1.0,
0.41960784792900085, 0.41960784792900085)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92156863212585449, 0.92156863212585449), (0.25,
0.85882353782653809, 0.85882353782653809), (0.375,
0.7921568751335144, 0.7921568751335144), (0.5,
0.68235296010971069, 0.68235296010971069), (0.625,
0.57254904508590698, 0.57254904508590698), (0.75,
0.44313725829124451, 0.44313725829124451), (0.875,
0.31764706969261169, 0.31764706969261169), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87058824300765991, 0.87058824300765991), (0.25,
0.7764706015586853, 0.7764706015586853), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.41960784792900085, 0.41960784792900085), (0.625,
0.25882354378700256, 0.25882354378700256), (0.75,
0.12941177189350128, 0.12941177189350128), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_BrBG_data = {'blue': [(0.0, 0.019607843831181526,
0.019607843831181526), (0.10000000000000001, 0.039215687662363052,
0.039215687662363052), (0.20000000000000001, 0.17647059261798859,
0.17647059261798859), (0.29999999999999999, 0.49019607901573181,
0.49019607901573181), (0.40000000000000002, 0.76470589637756348,
0.76470589637756348), (0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.89803922176361084, 0.89803922176361084),
(0.69999999999999996, 0.75686275959014893, 0.75686275959014893),
(0.80000000000000004, 0.56078433990478516, 0.56078433990478516),
(0.90000000000000002, 0.36862745881080627, 0.36862745881080627), (1.0,
0.18823529779911041, 0.18823529779911041)],
'green': [(0.0, 0.18823529779911041, 0.18823529779911041),
(0.10000000000000001, 0.31764706969261169, 0.31764706969261169),
(0.20000000000000001, 0.5058823823928833, 0.5058823823928833),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90980392694473267, 0.90980392694473267),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.91764706373214722, 0.91764706373214722),
(0.69999999999999996, 0.80392158031463623, 0.80392158031463623),
(0.80000000000000004, 0.59215688705444336, 0.59215688705444336),
(0.90000000000000002, 0.40000000596046448, 0.40000000596046448),
(1.0, 0.23529411852359772, 0.23529411852359772)],
'red': [(0.0, 0.32941177487373352, 0.32941177487373352),
(0.10000000000000001, 0.54901963472366333, 0.54901963472366333),
(0.20000000000000001, 0.74901962280273438, 0.74901962280273438),
(0.29999999999999999, 0.87450981140136719, 0.87450981140136719),
(0.40000000000000002, 0.96470588445663452, 0.96470588445663452),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.78039216995239258, 0.78039216995239258),
(0.69999999999999996, 0.50196081399917603, 0.50196081399917603),
(0.80000000000000004, 0.20784313976764679, 0.20784313976764679),
(0.90000000000000002, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0, 0.0)]}
_BuGn_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.97647058963775635,
0.97647058963775635), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.78823530673980713,
0.78823530673980713), (0.5, 0.64313727617263794, 0.64313727617263794),
(0.625, 0.46274510025978088, 0.46274510025978088), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.92549020051956177, 0.92549020051956177), (0.375,
0.84705883264541626, 0.84705883264541626), (0.5,
0.7607843279838562, 0.7607843279838562), (0.625,
0.68235296010971069, 0.68235296010971069), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)], 'red': [(0.0,
0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.60000002384185791, 0.60000002384185791), (0.5,
0.40000000596046448, 0.40000000596046448), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_BuPu_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.95686274766921997,
0.95686274766921997), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85490196943283081,
0.85490196943283081), (0.5, 0.7764706015586853, 0.7764706015586853),
(0.625, 0.69411766529083252, 0.69411766529083252), (0.75,
0.61568629741668701, 0.61568629741668701), (0.875,
0.48627451062202454, 0.48627451062202454), (1.0, 0.29411765933036804,
0.29411765933036804)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.92549020051956177, 0.92549020051956177), (0.25,
0.82745099067687988, 0.82745099067687988), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.58823531866073608, 0.58823531866073608), (0.625,
0.41960784792900085, 0.41960784792900085), (0.75,
0.25490197539329529, 0.25490197539329529), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.74901962280273438, 0.74901962280273438), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.54901963472366333, 0.54901963472366333), (0.625,
0.54901963472366333, 0.54901963472366333), (0.75,
0.53333336114883423, 0.53333336114883423), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.30196079611778259, 0.30196079611778259)]}
_Dark2_data = {'blue': [(0.0, 0.46666666865348816,
0.46666666865348816), (0.14285714285714285, 0.0078431377187371254,
0.0078431377187371254), (0.2857142857142857, 0.70196080207824707,
0.70196080207824707), (0.42857142857142855, 0.54117649793624878,
0.54117649793624878), (0.5714285714285714, 0.11764705926179886,
0.11764705926179886), (0.7142857142857143, 0.0078431377187371254,
0.0078431377187371254), (0.8571428571428571, 0.11372549086809158,
0.11372549086809158), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.14285714285714285, 0.37254902720451355, 0.37254902720451355),
(0.2857142857142857, 0.43921568989753723, 0.43921568989753723),
(0.42857142857142855, 0.16078431904315948, 0.16078431904315948),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 0.67058825492858887, 0.67058825492858887),
(0.8571428571428571, 0.46274510025978088, 0.46274510025978088),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.10588235408067703, 0.10588235408067703),
(0.14285714285714285, 0.85098040103912354, 0.85098040103912354),
(0.2857142857142857, 0.45882353186607361, 0.45882353186607361),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.40000000596046448, 0.40000000596046448),
(0.7142857142857143, 0.90196079015731812, 0.90196079015731812),
(0.8571428571428571, 0.65098041296005249, 0.65098041296005249),
(1.0, 0.40000000596046448, 0.40000000596046448)]}
_GnBu_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.85882353782653809,
0.85882353782653809), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.70980393886566162,
0.70980393886566162), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.82745099067687988, 0.82745099067687988), (0.75,
0.7450980544090271, 0.7450980544090271), (0.875, 0.67450982332229614,
0.67450982332229614), (1.0, 0.5058823823928833, 0.5058823823928833)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.9529411792755127, 0.9529411792755127), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.86666667461395264, 0.86666667461395264), (0.5,
0.80000001192092896, 0.80000001192092896), (0.625,
0.70196080207824707, 0.70196080207824707), (0.75,
0.54901963472366333, 0.54901963472366333), (0.875,
0.40784314274787903, 0.40784314274787903), (1.0,
0.25098040699958801, 0.25098040699958801)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.65882354974746704, 0.65882354974746704), (0.5,
0.48235294222831726, 0.48235294222831726), (0.625,
0.30588236451148987, 0.30588236451148987), (0.75,
0.16862745583057404, 0.16862745583057404), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_Greens_data = {'blue': [(0.0, 0.96078431606292725,
0.96078431606292725), (0.125, 0.87843137979507446,
0.87843137979507446), (0.25, 0.75294119119644165,
0.75294119119644165), (0.375, 0.60784316062927246,
0.60784316062927246), (0.5, 0.46274510025978088, 0.46274510025978088),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.91372549533843994, 0.91372549533843994), (0.375,
0.85098040103912354, 0.85098040103912354), (0.5,
0.76862746477127075, 0.76862746477127075), (0.625,
0.67058825492858887, 0.67058825492858887), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.78039216995239258, 0.78039216995239258), (0.375,
0.63137257099151611, 0.63137257099151611), (0.5,
0.45490196347236633, 0.45490196347236633), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_Greys_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608, 0.58823531866073608),
(0.625, 0.45098039507865906, 0.45098039507865906), (0.75,
0.32156863808631897, 0.32156863808631897), (0.875,
0.14509804546833038, 0.14509804546833038), (1.0, 0.0, 0.0)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)]}
_Oranges_data = {'blue': [(0.0, 0.92156863212585449,
0.92156863212585449), (0.125, 0.80784314870834351,
0.80784314870834351), (0.25, 0.63529413938522339,
0.63529413938522339), (0.375, 0.41960784792900085,
0.41960784792900085), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.074509806931018829, 0.074509806931018829), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.011764706112444401, 0.011764706112444401), (1.0,
0.015686275437474251, 0.015686275437474251)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.90196079015731812, 0.90196079015731812), (0.25,
0.81568628549575806, 0.81568628549575806), (0.375,
0.68235296010971069, 0.68235296010971069), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.4117647111415863, 0.4117647111415863), (0.75,
0.28235295414924622, 0.28235295414924622), (0.875,
0.21176470816135406, 0.21176470816135406), (1.0,
0.15294118225574493, 0.15294118225574493)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.99215686321258545,
0.99215686321258545), (0.625, 0.94509804248809814,
0.94509804248809814), (0.75, 0.85098040103912354,
0.85098040103912354), (0.875, 0.65098041296005249,
0.65098041296005249), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_OrRd_data = {'blue': [(0.0, 0.92549020051956177,
0.92549020051956177), (0.125, 0.78431373834609985,
0.78431373834609985), (0.25, 0.61960786581039429,
0.61960786581039429), (0.375, 0.51764708757400513,
0.51764708757400513), (0.5, 0.3490196168422699, 0.3490196168422699),
(0.625, 0.28235295414924622, 0.28235295414924622), (0.75,
0.12156862765550613, 0.12156862765550613), (0.875, 0.0, 0.0), (1.0,
0.0, 0.0)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90980392694473267, 0.90980392694473267), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.73333334922790527, 0.73333334922790527), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.3960784375667572, 0.3960784375667572), (0.75,
0.18823529779911041, 0.18823529779911041), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.98823529481887817,
0.98823529481887817), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.84313726425170898,
0.84313726425170898), (0.875, 0.70196080207824707,
0.70196080207824707), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_Paired_data = {'blue': [(0.0, 0.89019608497619629,
0.89019608497619629), (0.090909090909090912, 0.70588237047195435,
0.70588237047195435), (0.18181818181818182, 0.54117649793624878,
0.54117649793624878), (0.27272727272727271, 0.17254902422428131,
0.17254902422428131), (0.36363636363636365, 0.60000002384185791,
0.60000002384185791), (0.45454545454545453, 0.10980392247438431,
0.10980392247438431), (0.54545454545454541, 0.43529412150382996,
0.43529412150382996), (0.63636363636363635, 0.0, 0.0),
(0.72727272727272729, 0.83921569585800171, 0.83921569585800171),
(0.81818181818181823, 0.60392159223556519, 0.60392159223556519),
(0.90909090909090906, 0.60000002384185791, 0.60000002384185791), (1.0,
0.15686275064945221, 0.15686275064945221)],
'green': [(0.0, 0.80784314870834351, 0.80784314870834351),
(0.090909090909090912, 0.47058823704719543, 0.47058823704719543),
(0.18181818181818182, 0.87450981140136719, 0.87450981140136719),
(0.27272727272727271, 0.62745100259780884, 0.62745100259780884),
(0.36363636363636365, 0.60392159223556519, 0.60392159223556519),
(0.45454545454545453, 0.10196078568696976, 0.10196078568696976),
(0.54545454545454541, 0.74901962280273438, 0.74901962280273438),
(0.63636363636363635, 0.49803921580314636, 0.49803921580314636),
(0.72727272727272729, 0.69803923368453979, 0.69803923368453979),
(0.81818181818181823, 0.23921568691730499, 0.23921568691730499),
(0.90909090909090906, 1.0, 1.0), (1.0, 0.3490196168422699,
0.3490196168422699)],
'red': [(0.0, 0.65098041296005249, 0.65098041296005249),
(0.090909090909090912, 0.12156862765550613, 0.12156862765550613),
(0.18181818181818182, 0.69803923368453979, 0.69803923368453979),
(0.27272727272727271, 0.20000000298023224, 0.20000000298023224),
(0.36363636363636365, 0.9843137264251709, 0.9843137264251709),
(0.45454545454545453, 0.89019608497619629, 0.89019608497619629),
(0.54545454545454541, 0.99215686321258545, 0.99215686321258545),
(0.63636363636363635, 1.0, 1.0), (0.72727272727272729,
0.7921568751335144, 0.7921568751335144), (0.81818181818181823,
0.41568627953529358, 0.41568627953529358), (0.90909090909090906,
1.0, 1.0), (1.0, 0.69411766529083252, 0.69411766529083252)]}
_Pastel1_data = {'blue': [(0.0, 0.68235296010971069,
0.68235296010971069), (0.125, 0.89019608497619629,
0.89019608497619629), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.89411765336990356,
0.89411765336990356), (0.5, 0.65098041296005249, 0.65098041296005249),
(0.625, 0.80000001192092896, 0.80000001192092896), (0.75,
0.74117648601531982, 0.74117648601531982), (0.875,
0.92549020051956177, 0.92549020051956177), (1.0, 0.94901961088180542,
0.94901961088180542)],
'green': [(0.0, 0.70588237047195435, 0.70588237047195435), (0.125,
0.80392158031463623, 0.80392158031463623), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.79607844352722168, 0.79607844352722168), (0.5,
0.85098040103912354, 0.85098040103912354), (0.625, 1.0, 1.0),
(0.75, 0.84705883264541626, 0.84705883264541626), (0.875,
0.85490196943283081, 0.85490196943283081), (1.0,
0.94901961088180542, 0.94901961088180542)],
'red': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.70196080207824707, 0.70196080207824707), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.87058824300765991, 0.87058824300765991), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625, 1.0, 1.0),
(0.75, 0.89803922176361084, 0.89803922176361084), (0.875,
0.99215686321258545, 0.99215686321258545), (1.0,
0.94901961088180542, 0.94901961088180542)]}
_Pastel2_data = {'blue': [(0.0, 0.80392158031463623,
0.80392158031463623), (0.14285714285714285, 0.67450982332229614,
0.67450982332229614), (0.2857142857142857, 0.90980392694473267,
0.90980392694473267), (0.42857142857142855, 0.89411765336990356,
0.89411765336990356), (0.5714285714285714, 0.78823530673980713,
0.78823530673980713), (0.7142857142857143, 0.68235296010971069,
0.68235296010971069), (0.8571428571428571, 0.80000001192092896,
0.80000001192092896), (1.0, 0.80000001192092896,
0.80000001192092896)],
'green': [(0.0, 0.88627451658248901, 0.88627451658248901),
(0.14285714285714285, 0.80392158031463623, 0.80392158031463623),
(0.2857142857142857, 0.83529412746429443, 0.83529412746429443),
(0.42857142857142855, 0.7921568751335144, 0.7921568751335144),
(0.5714285714285714, 0.96078431606292725, 0.96078431606292725),
(0.7142857142857143, 0.94901961088180542, 0.94901961088180542),
(0.8571428571428571, 0.88627451658248901, 0.88627451658248901),
(1.0, 0.80000001192092896, 0.80000001192092896)],
'red': [(0.0, 0.70196080207824707, 0.70196080207824707),
(0.14285714285714285, 0.99215686321258545, 0.99215686321258545),
(0.2857142857142857, 0.79607844352722168, 0.79607844352722168),
(0.42857142857142855, 0.95686274766921997, 0.95686274766921997),
(0.5714285714285714, 0.90196079015731812, 0.90196079015731812),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.94509804248809814, 0.94509804248809814), (1.0,
0.80000001192092896, 0.80000001192092896)]}
_PiYG_data = {'blue': [(0.0, 0.32156863808631897,
0.32156863808631897), (0.10000000000000001, 0.49019607901573181,
0.49019607901573181), (0.20000000000000001, 0.68235296010971069,
0.68235296010971069), (0.29999999999999999, 0.85490196943283081,
0.85490196943283081), (0.40000000000000002, 0.93725490570068359,
0.93725490570068359), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81568628549575806, 0.81568628549575806),
(0.69999999999999996, 0.52549022436141968, 0.52549022436141968),
(0.80000000000000004, 0.25490197539329529, 0.25490197539329529),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128), (1.0,
0.098039217293262482, 0.098039217293262482)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.10588235408067703, 0.10588235408067703),
(0.20000000000000001, 0.46666666865348816, 0.46666666865348816),
(0.29999999999999999, 0.7137255072593689, 0.7137255072593689),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.96078431606292725, 0.96078431606292725),
(0.69999999999999996, 0.88235294818878174, 0.88235294818878174),
(0.80000000000000004, 0.73725491762161255, 0.73725491762161255),
(0.90000000000000002, 0.57254904508590698, 0.57254904508590698),
(1.0, 0.39215686917304993, 0.39215686917304993)],
'red': [(0.0, 0.55686277151107788, 0.55686277151107788),
(0.10000000000000001, 0.77254903316497803, 0.77254903316497803),
(0.20000000000000001, 0.87058824300765991, 0.87058824300765991),
(0.29999999999999999, 0.94509804248809814, 0.94509804248809814),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.90196079015731812, 0.90196079015731812),
(0.69999999999999996, 0.72156864404678345, 0.72156864404678345),
(0.80000000000000004, 0.49803921580314636, 0.49803921580314636),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.15294118225574493, 0.15294118225574493)]}
_PRGn_data = {'blue': [(0.0, 0.29411765933036804,
0.29411765933036804), (0.10000000000000001, 0.51372551918029785,
0.51372551918029785), (0.20000000000000001, 0.67058825492858887,
0.67058825492858887), (0.29999999999999999, 0.81176471710205078,
0.81176471710205078), (0.40000000000000002, 0.90980392694473267,
0.90980392694473267), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.82745099067687988, 0.82745099067687988),
(0.69999999999999996, 0.62745100259780884, 0.62745100259780884),
(0.80000000000000004, 0.3803921639919281, 0.3803921639919281),
(0.90000000000000002, 0.21568627655506134, 0.21568627655506134), (1.0,
0.10588235408067703, 0.10588235408067703)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.16470588743686676, 0.16470588743686676), (0.20000000000000001,
0.43921568989753723, 0.43921568989753723), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.83137255907058716, 0.83137255907058716), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.94117647409439087, 0.94117647409439087), (0.69999999999999996,
0.85882353782653809, 0.85882353782653809), (0.80000000000000004,
0.68235296010971069, 0.68235296010971069), (0.90000000000000002,
0.47058823704719543, 0.47058823704719543), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.25098040699958801, 0.25098040699958801),
(0.10000000000000001, 0.46274510025978088, 0.46274510025978088),
(0.20000000000000001, 0.60000002384185791, 0.60000002384185791),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90588235855102539, 0.90588235855102539),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85098040103912354, 0.85098040103912354),
(0.69999999999999996, 0.65098041296005249, 0.65098041296005249),
(0.80000000000000004, 0.35294118523597717, 0.35294118523597717),
(0.90000000000000002, 0.10588235408067703, 0.10588235408067703),
(1.0, 0.0, 0.0)]}
_PuBu_data = {'blue': [(0.0, 0.9843137264251709, 0.9843137264251709),
(0.125, 0.94901961088180542, 0.94901961088180542), (0.25,
0.90196079015731812, 0.90196079015731812), (0.375,
0.85882353782653809, 0.85882353782653809), (0.5, 0.81176471710205078,
0.81176471710205078), (0.625, 0.75294119119644165,
0.75294119119644165), (0.75, 0.69019609689712524,
0.69019609689712524), (0.875, 0.55294120311737061,
0.55294120311737061), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.43921568989753723, 0.43921568989753723), (0.875,
0.35294118523597717, 0.35294118523597717), (1.0,
0.21960784494876862, 0.21960784494876862)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.45490196347236633,
0.45490196347236633), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.019607843831181526,
0.019607843831181526), (0.875, 0.015686275437474251,
0.015686275437474251), (1.0, 0.0078431377187371254,
0.0078431377187371254)]}
_PuBuGn_data = {'blue': [(0.0, 0.9843137264251709,
0.9843137264251709), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85882353782653809,
0.85882353782653809), (0.5, 0.81176471710205078, 0.81176471710205078),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.54117649793624878, 0.54117649793624878), (0.875, 0.3490196168422699,
0.3490196168422699), (1.0, 0.21176470816135406, 0.21176470816135406)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.88627451658248901, 0.88627451658248901), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.5058823823928833, 0.5058823823928833), (0.875,
0.42352941632270813, 0.42352941632270813), (1.0,
0.27450981736183167, 0.27450981736183167)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.40392157435417175,
0.40392157435417175), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.0078431377187371254,
0.0078431377187371254), (0.875, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0039215688593685627,
0.0039215688593685627)]}
_PuOr_data = {'blue': [(0.0, 0.031372550874948502,
0.031372550874948502), (0.10000000000000001, 0.023529412224888802,
0.023529412224888802), (0.20000000000000001, 0.078431375324726105,
0.078431375324726105), (0.29999999999999999, 0.38823530077934265,
0.38823530077934265), (0.40000000000000002, 0.7137255072593689,
0.7137255072593689), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.92156863212585449, 0.92156863212585449),
(0.69999999999999996, 0.82352942228317261, 0.82352942228317261),
(0.80000000000000004, 0.67450982332229614, 0.67450982332229614),
(0.90000000000000002, 0.53333336114883423, 0.53333336114883423), (1.0,
0.29411765933036804, 0.29411765933036804)],
'green': [(0.0, 0.23137255012989044, 0.23137255012989044),
(0.10000000000000001, 0.34509804844856262, 0.34509804844856262),
(0.20000000000000001, 0.50980395078659058, 0.50980395078659058),
(0.29999999999999999, 0.72156864404678345, 0.72156864404678345),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85490196943283081, 0.85490196943283081),
(0.69999999999999996, 0.67058825492858887, 0.67058825492858887),
(0.80000000000000004, 0.45098039507865906, 0.45098039507865906),
(0.90000000000000002, 0.15294118225574493, 0.15294118225574493),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.10000000000000001, 0.70196080207824707, 0.70196080207824707),
(0.20000000000000001, 0.87843137979507446, 0.87843137979507446),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.84705883264541626, 0.84705883264541626),
(0.69999999999999996, 0.69803923368453979, 0.69803923368453979),
(0.80000000000000004, 0.50196081399917603, 0.50196081399917603),
(0.90000000000000002, 0.32941177487373352, 0.32941177487373352),
(1.0, 0.17647059261798859, 0.17647059261798859)]}
_PuRd_data = {'blue': [(0.0, 0.97647058963775635,
0.97647058963775635), (0.125, 0.93725490570068359,
0.93725490570068359), (0.25, 0.85490196943283081,
0.85490196943283081), (0.375, 0.78039216995239258,
0.78039216995239258), (0.5, 0.69019609689712524, 0.69019609689712524),
(0.625, 0.54117649793624878, 0.54117649793624878), (0.75,
0.33725491166114807, 0.33725491166114807), (0.875,
0.26274511218070984, 0.26274511218070984), (1.0, 0.12156862765550613,
0.12156862765550613)],
'green': [(0.0, 0.95686274766921997, 0.95686274766921997), (0.125,
0.88235294818878174, 0.88235294818878174), (0.25,
0.72549021244049072, 0.72549021244049072), (0.375,
0.58039218187332153, 0.58039218187332153), (0.5,
0.3960784375667572, 0.3960784375667572), (0.625,
0.16078431904315948, 0.16078431904315948), (0.75,
0.070588238537311554, 0.070588238537311554), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.78823530673980713, 0.78823530673980713), (0.5,
0.87450981140136719, 0.87450981140136719), (0.625,
0.90588235855102539, 0.90588235855102539), (0.75,
0.80784314870834351, 0.80784314870834351), (0.875,
0.59607845544815063, 0.59607845544815063), (1.0,
0.40392157435417175, 0.40392157435417175)]}
_Purples_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.96078431606292725,
0.96078431606292725), (0.25, 0.92156863212585449,
0.92156863212585449), (0.375, 0.86274510622024536,
0.86274510622024536), (0.5, 0.78431373834609985, 0.78431373834609985),
(0.625, 0.729411780834198, 0.729411780834198), (0.75,
0.63921570777893066, 0.63921570777893066), (0.875,
0.56078433990478516, 0.56078433990478516), (1.0, 0.49019607901573181,
0.49019607901573181)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92941176891326904, 0.92941176891326904), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.60392159223556519, 0.60392159223556519), (0.625,
0.49019607901573181, 0.49019607901573181), (0.75,
0.31764706969261169, 0.31764706969261169), (0.875,
0.15294118225574493, 0.15294118225574493), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.93725490570068359, 0.93725490570068359), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.61960786581039429, 0.61960786581039429), (0.625,
0.50196081399917603, 0.50196081399917603), (0.75,
0.41568627953529358, 0.41568627953529358), (0.875,
0.32941177487373352, 0.32941177487373352), (1.0,
0.24705882370471954, 0.24705882370471954)]}
_RdBu_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.94117647409439087, 0.94117647409439087),
(0.69999999999999996, 0.87058824300765991, 0.87058824300765991),
(0.80000000000000004, 0.76470589637756348, 0.76470589637756348),
(0.90000000000000002, 0.67450982332229614, 0.67450982332229614), (1.0,
0.3803921639919281, 0.3803921639919281)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.89803922176361084, 0.89803922176361084), (0.69999999999999996,
0.77254903316497803, 0.77254903316497803), (0.80000000000000004,
0.57647061347961426, 0.57647061347961426), (0.90000000000000002,
0.40000000596046448, 0.40000000596046448), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81960785388946533, 0.81960785388946533),
(0.69999999999999996, 0.57254904508590698, 0.57254904508590698),
(0.80000000000000004, 0.26274511218070984, 0.26274511218070984),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128),
(1.0, 0.019607843831181526, 0.019607843831181526)]}
_RdGy_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 1.0, 1.0), (0.59999999999999998,
0.87843137979507446, 0.87843137979507446), (0.69999999999999996,
0.729411780834198, 0.729411780834198), (0.80000000000000004,
0.52941179275512695, 0.52941179275512695), (0.90000000000000002,
0.30196079611778259, 0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.87843137979507446, 0.87843137979507446),
(0.69999999999999996, 0.729411780834198, 0.729411780834198),
(0.80000000000000004, 0.52941179275512695, 0.52941179275512695),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.10196078568696976, 0.10196078568696976)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.87843137979507446,
0.87843137979507446), (0.69999999999999996, 0.729411780834198,
0.729411780834198), (0.80000000000000004, 0.52941179275512695,
0.52941179275512695), (0.90000000000000002, 0.30196079611778259,
0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)]}
_RdPu_data = {'blue': [(0.0, 0.9529411792755127, 0.9529411792755127),
(0.125, 0.86666667461395264, 0.86666667461395264), (0.25,
0.75294119119644165, 0.75294119119644165), (0.375,
0.70980393886566162, 0.70980393886566162), (0.5, 0.63137257099151611,
0.63137257099151611), (0.625, 0.59215688705444336,
0.59215688705444336), (0.75, 0.49411764740943909,
0.49411764740943909), (0.875, 0.46666666865348816,
0.46666666865348816), (1.0, 0.41568627953529358,
0.41568627953529358)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.77254903316497803, 0.77254903316497803), (0.375,
0.62352943420410156, 0.62352943420410156), (0.5,
0.40784314274787903, 0.40784314274787903), (0.625,
0.20392157137393951, 0.20392157137393951), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.0039215688593685627, 0.0039215688593685627), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99215686321258545,
0.99215686321258545), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98039215803146362,
0.98039215803146362), (0.5, 0.9686274528503418,
0.9686274528503418), (0.625, 0.86666667461395264,
0.86666667461395264), (0.75, 0.68235296010971069,
0.68235296010971069), (0.875, 0.47843137383460999,
0.47843137383460999), (1.0, 0.28627452254295349,
0.28627452254295349)]}
_RdYlBu_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000149011612,
0.15294118225574493, 0.15294118225574493),
(0.20000000298023224, 0.26274511218070984,
0.26274511218070984), (0.30000001192092896,
0.3803921639919281, 0.3803921639919281),
(0.40000000596046448, 0.56470590829849243,
0.56470590829849243), (0.5, 0.74901962280273438,
0.74901962280273438), (0.60000002384185791,
0.97254902124404907, 0.97254902124404907),
(0.69999998807907104, 0.91372549533843994,
0.91372549533843994), (0.80000001192092896,
0.81960785388946533, 0.81960785388946533),
(0.89999997615814209, 0.70588237047195435,
0.70588237047195435), (1.0, 0.58431375026702881,
0.58431375026702881)], 'green': [(0.0, 0.0, 0.0),
(0.10000000149011612, 0.18823529779911041,
0.18823529779911041), (0.20000000298023224,
0.42745098471641541, 0.42745098471641541),
(0.30000001192092896, 0.68235296010971069,
0.68235296010971069), (0.40000000596046448,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0,
1.0), (0.60000002384185791, 0.9529411792755127,
0.9529411792755127), (0.69999998807907104,
0.85098040103912354, 0.85098040103912354),
(0.80000001192092896, 0.67843139171600342,
0.67843139171600342), (0.89999997615814209,
0.45882353186607361, 0.45882353186607361), (1.0,
0.21176470816135406, 0.21176470816135406)], 'red':
[(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000149011612, 0.84313726425170898,
0.84313726425170898), (0.20000000298023224,
0.95686274766921997, 0.95686274766921997),
(0.30000001192092896, 0.99215686321258545,
0.99215686321258545), (0.40000000596046448,
0.99607843160629272, 0.99607843160629272), (0.5, 1.0,
1.0), (0.60000002384185791, 0.87843137979507446,
0.87843137979507446), (0.69999998807907104,
0.67058825492858887, 0.67058825492858887),
(0.80000001192092896, 0.45490196347236633,
0.45490196347236633), (0.89999997615814209,
0.27058824896812439, 0.27058824896812439), (1.0,
0.19215686619281769, 0.19215686619281769)]}
_RdYlGn_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000000000001, 0.15294118225574493,
0.15294118225574493), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.54509806632995605, 0.54509806632995605),
(0.69999999999999996, 0.41568627953529358, 0.41568627953529358),
(0.80000000000000004, 0.38823530077934265, 0.38823530077934265),
(0.90000000000000002, 0.31372550129890442, 0.31372550129890442), (1.0,
0.21568627655506134, 0.21568627655506134)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.18823529779911041, 0.18823529779911041), (0.20000000000000001,
0.42745098471641541, 0.42745098471641541), (0.29999999999999999,
0.68235296010971069, 0.68235296010971069), (0.40000000000000002,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.93725490570068359, 0.93725490570068359),
(0.69999999999999996, 0.85098040103912354, 0.85098040103912354),
(0.80000000000000004, 0.74117648601531982, 0.74117648601531982),
(0.90000000000000002, 0.59607845544815063, 0.59607845544815063),
(1.0, 0.40784314274787903, 0.40784314274787903)],
'red': [(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000000000001, 0.84313726425170898, 0.84313726425170898),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.85098040103912354,
0.85098040103912354), (0.69999999999999996, 0.65098041296005249,
0.65098041296005249), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.10196078568696976,
0.10196078568696976), (1.0, 0.0, 0.0)]}
_Reds_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.82352942228317261,
0.82352942228317261), (0.25, 0.63137257099151611,
0.63137257099151611), (0.375, 0.44705882668495178,
0.44705882668495178), (0.5, 0.29019609093666077, 0.29019609093666077),
(0.625, 0.17254902422428131, 0.17254902422428131), (0.75,
0.11372549086809158, 0.11372549086809158), (0.875,
0.08235294371843338, 0.08235294371843338), (1.0, 0.050980392843484879,
0.050980392843484879)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.73333334922790527, 0.73333334922790527), (0.375,
0.57254904508590698, 0.57254904508590698), (0.5,
0.41568627953529358, 0.41568627953529358), (0.625,
0.23137255012989044, 0.23137255012989044), (0.75,
0.094117648899555206, 0.094117648899555206), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98823529481887817,
0.98823529481887817), (0.5, 0.9843137264251709,
0.9843137264251709), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.79607844352722168,
0.79607844352722168), (0.875, 0.64705884456634521,
0.64705884456634521), (1.0, 0.40392157435417175,
0.40392157435417175)]}
_Set1_data = {'blue': [(0.0, 0.10980392247438431,
0.10980392247438431), (0.125, 0.72156864404678345,
0.72156864404678345), (0.25, 0.29019609093666077,
0.29019609093666077), (0.375, 0.63921570777893066,
0.63921570777893066), (0.5, 0.0, 0.0), (0.625, 0.20000000298023224,
0.20000000298023224), (0.75, 0.15686275064945221,
0.15686275064945221), (0.875, 0.74901962280273438,
0.74901962280273438), (1.0, 0.60000002384185791,
0.60000002384185791)],
'green': [(0.0, 0.10196078568696976, 0.10196078568696976), (0.125,
0.49411764740943909, 0.49411764740943909), (0.25,
0.68627452850341797, 0.68627452850341797), (0.375,
0.30588236451148987, 0.30588236451148987), (0.5,
0.49803921580314636, 0.49803921580314636), (0.625, 1.0, 1.0),
(0.75, 0.33725491166114807, 0.33725491166114807), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.60000002384185791, 0.60000002384185791)],
'red': [(0.0, 0.89411765336990356, 0.89411765336990356), (0.125,
0.21568627655506134, 0.21568627655506134), (0.25,
0.30196079611778259, 0.30196079611778259), (0.375,
0.59607845544815063, 0.59607845544815063), (0.5, 1.0, 1.0),
(0.625, 1.0, 1.0), (0.75, 0.65098041296005249,
0.65098041296005249), (0.875, 0.9686274528503418,
0.9686274528503418), (1.0, 0.60000002384185791,
0.60000002384185791)]}
_Set2_data = {'blue': [(0.0, 0.64705884456634521,
0.64705884456634521), (0.14285714285714285, 0.38431373238563538,
0.38431373238563538), (0.2857142857142857, 0.79607844352722168,
0.79607844352722168), (0.42857142857142855, 0.76470589637756348,
0.76470589637756348), (0.5714285714285714, 0.32941177487373352,
0.32941177487373352), (0.7142857142857143, 0.18431372940540314,
0.18431372940540314), (0.8571428571428571, 0.58039218187332153,
0.58039218187332153), (1.0, 0.70196080207824707,
0.70196080207824707)],
'green': [(0.0, 0.7607843279838562, 0.7607843279838562),
(0.14285714285714285, 0.55294120311737061, 0.55294120311737061),
(0.2857142857142857, 0.62745100259780884, 0.62745100259780884),
(0.42857142857142855, 0.54117649793624878, 0.54117649793624878),
(0.5714285714285714, 0.84705883264541626, 0.84705883264541626),
(0.7142857142857143, 0.85098040103912354, 0.85098040103912354),
(0.8571428571428571, 0.76862746477127075, 0.76862746477127075),
(1.0, 0.70196080207824707, 0.70196080207824707)],
'red': [(0.0, 0.40000000596046448, 0.40000000596046448),
(0.14285714285714285, 0.98823529481887817, 0.98823529481887817),
(0.2857142857142857, 0.55294120311737061, 0.55294120311737061),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.89803922176361084, 0.89803922176361084), (1.0,
0.70196080207824707, 0.70196080207824707)]}
_Set3_data = {'blue': [(0.0, 0.78039216995239258,
0.78039216995239258), (0.090909090909090912, 0.70196080207824707,
0.70196080207824707), (0.18181818181818182, 0.85490196943283081,
0.85490196943283081), (0.27272727272727271, 0.44705882668495178,
0.44705882668495178), (0.36363636363636365, 0.82745099067687988,
0.82745099067687988), (0.45454545454545453, 0.38431373238563538,
0.38431373238563538), (0.54545454545454541, 0.4117647111415863,
0.4117647111415863), (0.63636363636363635, 0.89803922176361084,
0.89803922176361084), (0.72727272727272729, 0.85098040103912354,
0.85098040103912354), (0.81818181818181823, 0.74117648601531982,
0.74117648601531982), (0.90909090909090906, 0.77254903316497803,
0.77254903316497803), (1.0, 0.43529412150382996,
0.43529412150382996)],
'green': [(0.0, 0.82745099067687988, 0.82745099067687988),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.729411780834198, 0.729411780834198), (0.27272727272727271,
0.50196081399917603, 0.50196081399917603), (0.36363636363636365,
0.69411766529083252, 0.69411766529083252), (0.45454545454545453,
0.70588237047195435, 0.70588237047195435), (0.54545454545454541,
0.87058824300765991, 0.87058824300765991), (0.63636363636363635,
0.80392158031463623, 0.80392158031463623), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.50196081399917603, 0.50196081399917603), (0.90909090909090906,
0.92156863212585449, 0.92156863212585449), (1.0,
0.92941176891326904, 0.92941176891326904)],
'red': [(0.0, 0.55294120311737061, 0.55294120311737061),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.7450980544090271, 0.7450980544090271), (0.27272727272727271,
0.9843137264251709, 0.9843137264251709), (0.36363636363636365,
0.50196081399917603, 0.50196081399917603), (0.45454545454545453,
0.99215686321258545, 0.99215686321258545), (0.54545454545454541,
0.70196080207824707, 0.70196080207824707), (0.63636363636363635,
0.98823529481887817, 0.98823529481887817), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.73725491762161255, 0.73725491762161255), (0.90909090909090906,
0.80000001192092896, 0.80000001192092896), (1.0, 1.0, 1.0)]}
_Spectral_data = {'blue': [(0.0, 0.25882354378700256,
0.25882354378700256), (0.10000000000000001, 0.30980393290519714,
0.30980393290519714), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.59607845544815063, 0.59607845544815063),
(0.69999999999999996, 0.64313727617263794, 0.64313727617263794),
(0.80000000000000004, 0.64705884456634521, 0.64705884456634521),
(0.90000000000000002, 0.74117648601531982, 0.74117648601531982), (1.0,
0.63529413938522339, 0.63529413938522339)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.24313725531101227, 0.24313725531101227),
(0.20000000000000001, 0.42745098471641541, 0.42745098471641541),
(0.29999999999999999, 0.68235296010971069, 0.68235296010971069),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.96078431606292725,
0.96078431606292725), (0.69999999999999996, 0.86666667461395264,
0.86666667461395264), (0.80000000000000004, 0.7607843279838562,
0.7607843279838562), (0.90000000000000002, 0.53333336114883423,
0.53333336114883423), (1.0, 0.30980393290519714,
0.30980393290519714)],
'red': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.10000000000000001, 0.83529412746429443, 0.83529412746429443),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.90196079015731812,
0.90196079015731812), (0.69999999999999996, 0.67058825492858887,
0.67058825492858887), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.19607843458652496,
0.19607843458652496), (1.0, 0.36862745881080627,
0.36862745881080627)]}
_YlGn_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.72549021244049072,
0.72549021244049072), (0.25, 0.63921570777893066,
0.63921570777893066), (0.375, 0.55686277151107788,
0.55686277151107788), (0.5, 0.47450980544090271, 0.47450980544090271),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.26274511218070984, 0.26274511218070984), (0.875,
0.21568627655506134, 0.21568627655506134), (1.0, 0.16078431904315948,
0.16078431904315948)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.98823529481887817,
0.98823529481887817), (0.25, 0.94117647409439087,
0.94117647409439087), (0.375, 0.86666667461395264,
0.86666667461395264), (0.5, 0.7764706015586853,
0.7764706015586853), (0.625, 0.67058825492858887,
0.67058825492858887), (0.75, 0.51764708757400513,
0.51764708757400513), (0.875, 0.40784314274787903,
0.40784314274787903), (1.0, 0.27058824896812439,
0.27058824896812439)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.67843139171600342,
0.67843139171600342), (0.5, 0.47058823704719543,
0.47058823704719543), (0.625, 0.25490197539329529,
0.25490197539329529), (0.75, 0.13725490868091583,
0.13725490868091583), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)]}
_YlGnBu_data = {'blue': [(0.0, 0.85098040103912354,
0.85098040103912354), (0.125, 0.69411766529083252,
0.69411766529083252), (0.25, 0.70588237047195435,
0.70588237047195435), (0.375, 0.73333334922790527,
0.73333334922790527), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.65882354974746704, 0.65882354974746704), (0.875,
0.58039218187332153, 0.58039218187332153), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.97254902124404907,
0.97254902124404907), (0.25, 0.91372549533843994,
0.91372549533843994), (0.375, 0.80392158031463623,
0.80392158031463623), (0.5, 0.7137255072593689,
0.7137255072593689), (0.625, 0.56862747669219971,
0.56862747669219971), (0.75, 0.36862745881080627,
0.36862745881080627), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.11372549086809158,
0.11372549086809158)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.78039216995239258,
0.78039216995239258), (0.375, 0.49803921580314636,
0.49803921580314636), (0.5, 0.25490197539329529,
0.25490197539329529), (0.625, 0.11372549086809158,
0.11372549086809158), (0.75, 0.13333334028720856,
0.13333334028720856), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.031372550874948502,
0.031372550874948502)]}
_YlOrBr_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.73725491762161255,
0.73725491762161255), (0.25, 0.56862747669219971,
0.56862747669219971), (0.375, 0.30980393290519714,
0.30980393290519714), (0.5, 0.16078431904315948, 0.16078431904315948),
(0.625, 0.078431375324726105, 0.078431375324726105), (0.75,
0.0078431377187371254, 0.0078431377187371254), (0.875,
0.015686275437474251, 0.015686275437474251), (1.0,
0.023529412224888802, 0.023529412224888802)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.89019608497619629,
0.89019608497619629), (0.375, 0.76862746477127075,
0.76862746477127075), (0.5, 0.60000002384185791,
0.60000002384185791), (0.625, 0.43921568989753723,
0.43921568989753723), (0.75, 0.29803922772407532,
0.29803922772407532), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.14509804546833038,
0.14509804546833038)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625,
0.92549020051956177, 0.92549020051956177), (0.75,
0.80000001192092896, 0.80000001192092896), (0.875,
0.60000002384185791, 0.60000002384185791), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_YlOrRd_data = {'blue': [(0.0, 0.80000001192092896,
0.80000001192092896), (0.125, 0.62745100259780884,
0.62745100259780884), (0.25, 0.46274510025978088,
0.46274510025978088), (0.375, 0.29803922772407532,
0.29803922772407532), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.16470588743686676, 0.16470588743686676), (0.75,
0.10980392247438431, 0.10980392247438431), (0.875,
0.14901961386203766, 0.14901961386203766), (1.0, 0.14901961386203766,
0.14901961386203766)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.69803923368453979,
0.69803923368453979), (0.5, 0.55294120311737061,
0.55294120311737061), (0.625, 0.30588236451148987,
0.30588236451148987), (0.75, 0.10196078568696976,
0.10196078568696976), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99215686321258545, 0.99215686321258545), (0.625,
0.98823529481887817, 0.98823529481887817), (0.75,
0.89019608497619629, 0.89019608497619629), (0.875,
0.74117648601531982, 0.74117648601531982), (1.0,
0.50196081399917603, 0.50196081399917603)]}
# The next 7 palettes are from the Yorick scientific visalisation package,
# an evolution of the GIST package, both by David H. Munro.
# They are released under a BSD-like license (see LICENSE_YORICK in
# the license directory of the matplotlib source distribution).
_gist_earth_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.18039216101169586, 0.18039216101169586), (0.0084033617749810219,
0.22745098173618317, 0.22745098173618317), (0.012605042196810246,
0.27058824896812439, 0.27058824896812439), (0.016806723549962044,
0.31764706969261169, 0.31764706969261169), (0.021008403971791267,
0.36078432202339172, 0.36078432202339172), (0.025210084393620491,
0.40784314274787903, 0.40784314274787903), (0.029411764815449715,
0.45490196347236633, 0.45490196347236633), (0.033613447099924088,
0.45490196347236633, 0.45490196347236633), (0.037815127521753311,
0.45490196347236633, 0.45490196347236633), (0.042016807943582535,
0.45490196347236633, 0.45490196347236633), (0.046218488365411758,
0.45490196347236633, 0.45490196347236633), (0.050420168787240982,
0.45882353186607361, 0.45882353186607361), (0.054621849209070206,
0.45882353186607361, 0.45882353186607361), (0.058823529630899429,
0.45882353186607361, 0.45882353186607361), (0.063025213778018951,
0.45882353186607361, 0.45882353186607361), (0.067226894199848175,
0.45882353186607361, 0.45882353186607361), (0.071428574621677399,
0.46274510025978088, 0.46274510025978088), (0.075630255043506622,
0.46274510025978088, 0.46274510025978088), (0.079831935465335846,
0.46274510025978088, 0.46274510025978088), (0.08403361588716507,
0.46274510025978088, 0.46274510025978088), (0.088235296308994293,
0.46274510025978088, 0.46274510025978088), (0.092436976730823517,
0.46666666865348816, 0.46666666865348816), (0.09663865715265274,
0.46666666865348816, 0.46666666865348816), (0.10084033757448196,
0.46666666865348816, 0.46666666865348816), (0.10504201799631119,
0.46666666865348816, 0.46666666865348816), (0.10924369841814041,
0.46666666865348816, 0.46666666865348816), (0.11344537883996964,
0.47058823704719543, 0.47058823704719543), (0.11764705926179886,
0.47058823704719543, 0.47058823704719543), (0.12184873968362808,
0.47058823704719543, 0.47058823704719543), (0.1260504275560379,
0.47058823704719543, 0.47058823704719543), (0.13025210797786713,
0.47058823704719543, 0.47058823704719543), (0.13445378839969635,
0.47450980544090271, 0.47450980544090271), (0.13865546882152557,
0.47450980544090271, 0.47450980544090271), (0.1428571492433548,
0.47450980544090271, 0.47450980544090271), (0.14705882966518402,
0.47450980544090271, 0.47450980544090271), (0.15126051008701324,
0.47450980544090271, 0.47450980544090271), (0.15546219050884247,
0.47843137383460999, 0.47843137383460999), (0.15966387093067169,
0.47843137383460999, 0.47843137383460999), (0.16386555135250092,
0.47843137383460999, 0.47843137383460999), (0.16806723177433014,
0.47843137383460999, 0.47843137383460999), (0.17226891219615936,
0.47843137383460999, 0.47843137383460999), (0.17647059261798859,
0.48235294222831726, 0.48235294222831726), (0.18067227303981781,
0.48235294222831726, 0.48235294222831726), (0.18487395346164703,
0.48235294222831726, 0.48235294222831726), (0.18907563388347626,
0.48235294222831726, 0.48235294222831726), (0.19327731430530548,
0.48235294222831726, 0.48235294222831726), (0.1974789947271347,
0.48627451062202454, 0.48627451062202454), (0.20168067514896393,
0.48627451062202454, 0.48627451062202454), (0.20588235557079315,
0.48627451062202454, 0.48627451062202454), (0.21008403599262238,
0.48627451062202454, 0.48627451062202454), (0.2142857164144516,
0.48627451062202454, 0.48627451062202454), (0.21848739683628082,
0.49019607901573181, 0.49019607901573181), (0.22268907725811005,
0.49019607901573181, 0.49019607901573181), (0.22689075767993927,
0.49019607901573181, 0.49019607901573181), (0.23109243810176849,
0.49019607901573181, 0.49019607901573181), (0.23529411852359772,
0.49019607901573181, 0.49019607901573181), (0.23949579894542694,
0.49411764740943909, 0.49411764740943909), (0.24369747936725616,
0.49411764740943909, 0.49411764740943909), (0.24789915978908539,
0.49411764740943909, 0.49411764740943909), (0.25210085511207581,
0.49411764740943909, 0.49411764740943909), (0.25630253553390503,
0.49411764740943909, 0.49411764740943909), (0.26050421595573425,
0.49803921580314636, 0.49803921580314636), (0.26470589637756348,
0.49803921580314636, 0.49803921580314636), (0.2689075767993927,
0.49803921580314636, 0.49803921580314636), (0.27310925722122192,
0.49803921580314636, 0.49803921580314636), (0.27731093764305115,
0.49803921580314636, 0.49803921580314636), (0.28151261806488037,
0.50196081399917603, 0.50196081399917603), (0.28571429848670959,
0.49411764740943909, 0.49411764740943909), (0.28991597890853882,
0.49019607901573181, 0.49019607901573181), (0.29411765933036804,
0.48627451062202454, 0.48627451062202454), (0.29831933975219727,
0.48235294222831726, 0.48235294222831726), (0.30252102017402649,
0.47843137383460999, 0.47843137383460999), (0.30672270059585571,
0.47058823704719543, 0.47058823704719543), (0.31092438101768494,
0.46666666865348816, 0.46666666865348816), (0.31512606143951416,
0.46274510025978088, 0.46274510025978088), (0.31932774186134338,
0.45882353186607361, 0.45882353186607361), (0.32352942228317261,
0.45098039507865906, 0.45098039507865906), (0.32773110270500183,
0.44705882668495178, 0.44705882668495178), (0.33193278312683105,
0.44313725829124451, 0.44313725829124451), (0.33613446354866028,
0.43529412150382996, 0.43529412150382996), (0.3403361439704895,
0.43137255311012268, 0.43137255311012268), (0.34453782439231873,
0.42745098471641541, 0.42745098471641541), (0.34873950481414795,
0.42352941632270813, 0.42352941632270813), (0.35294118523597717,
0.41568627953529358, 0.41568627953529358), (0.3571428656578064,
0.4117647111415863, 0.4117647111415863), (0.36134454607963562,
0.40784314274787903, 0.40784314274787903), (0.36554622650146484,
0.40000000596046448, 0.40000000596046448), (0.36974790692329407,
0.3960784375667572, 0.3960784375667572), (0.37394958734512329,
0.39215686917304993, 0.39215686917304993), (0.37815126776695251,
0.38431373238563538, 0.38431373238563538), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.37647059559822083, 0.37647059559822083), (0.39075630903244019,
0.36862745881080627, 0.36862745881080627), (0.39495798945426941,
0.364705890417099, 0.364705890417099), (0.39915966987609863,
0.36078432202339172, 0.36078432202339172), (0.40336135029792786,
0.35294118523597717, 0.35294118523597717), (0.40756303071975708,
0.3490196168422699, 0.3490196168422699), (0.4117647111415863,
0.34509804844856262, 0.34509804844856262), (0.41596639156341553,
0.33725491166114807, 0.33725491166114807), (0.42016807198524475,
0.3333333432674408, 0.3333333432674408), (0.42436975240707397,
0.32941177487373352, 0.32941177487373352), (0.4285714328289032,
0.32156863808631897, 0.32156863808631897), (0.43277311325073242,
0.31764706969261169, 0.31764706969261169), (0.43697479367256165,
0.31372550129890442, 0.31372550129890442), (0.44117647409439087,
0.30588236451148987, 0.30588236451148987), (0.44537815451622009,
0.30196079611778259, 0.30196079611778259), (0.44957983493804932,
0.29803922772407532, 0.29803922772407532), (0.45378151535987854,
0.29019609093666077, 0.29019609093666077), (0.45798319578170776,
0.28627452254295349, 0.28627452254295349), (0.46218487620353699,
0.27843138575553894, 0.27843138575553894), (0.46638655662536621,
0.27450981736183167, 0.27450981736183167), (0.47058823704719543,
0.27843138575553894, 0.27843138575553894), (0.47478991746902466,
0.28235295414924622, 0.28235295414924622), (0.47899159789085388,
0.28235295414924622, 0.28235295414924622), (0.48319327831268311,
0.28627452254295349, 0.28627452254295349), (0.48739495873451233,
0.28627452254295349, 0.28627452254295349), (0.49159663915634155,
0.29019609093666077, 0.29019609093666077), (0.49579831957817078,
0.29411765933036804, 0.29411765933036804), (0.5, 0.29411765933036804,
0.29411765933036804), (0.50420171022415161, 0.29803922772407532,
0.29803922772407532), (0.50840336084365845, 0.29803922772407532,
0.29803922772407532), (0.51260507106781006, 0.30196079611778259,
0.30196079611778259), (0.51680672168731689, 0.30196079611778259,
0.30196079611778259), (0.52100843191146851, 0.30588236451148987,
0.30588236451148987), (0.52521008253097534, 0.30980393290519714,
0.30980393290519714), (0.52941179275512695, 0.30980393290519714,
0.30980393290519714), (0.53361344337463379, 0.31372550129890442,
0.31372550129890442), (0.5378151535987854, 0.31372550129890442,
0.31372550129890442), (0.54201680421829224, 0.31764706969261169,
0.31764706969261169), (0.54621851444244385, 0.32156863808631897,
0.32156863808631897), (0.55042016506195068, 0.32156863808631897,
0.32156863808631897), (0.55462187528610229, 0.32156863808631897,
0.32156863808631897), (0.55882352590560913, 0.32549020648002625,
0.32549020648002625), (0.56302523612976074, 0.32549020648002625,
0.32549020648002625), (0.56722688674926758, 0.32549020648002625,
0.32549020648002625), (0.57142859697341919, 0.32941177487373352,
0.32941177487373352), (0.57563024759292603, 0.32941177487373352,
0.32941177487373352), (0.57983195781707764, 0.32941177487373352,
0.32941177487373352), (0.58403360843658447, 0.3333333432674408,
0.3333333432674408), (0.58823531866073608, 0.3333333432674408,
0.3333333432674408), (0.59243696928024292, 0.3333333432674408,
0.3333333432674408), (0.59663867950439453, 0.33725491166114807,
0.33725491166114807), (0.60084033012390137, 0.33725491166114807,
0.33725491166114807), (0.60504204034805298, 0.33725491166114807,
0.33725491166114807), (0.60924369096755981, 0.34117648005485535,
0.34117648005485535), (0.61344540119171143, 0.34117648005485535,
0.34117648005485535), (0.61764705181121826, 0.34117648005485535,
0.34117648005485535), (0.62184876203536987, 0.34509804844856262,
0.34509804844856262), (0.62605041265487671, 0.34509804844856262,
0.34509804844856262), (0.63025212287902832, 0.34509804844856262,
0.34509804844856262), (0.63445377349853516, 0.3490196168422699,
0.3490196168422699), (0.63865548372268677, 0.3490196168422699,
0.3490196168422699), (0.6428571343421936, 0.3490196168422699,
0.3490196168422699), (0.64705884456634521, 0.35294118523597717,
0.35294118523597717), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.35294118523597717,
0.35294118523597717), (0.6596638560295105, 0.35686275362968445,
0.35686275362968445), (0.66386556625366211, 0.35686275362968445,
0.35686275362968445), (0.66806721687316895, 0.35686275362968445,
0.35686275362968445), (0.67226892709732056, 0.36078432202339172,
0.36078432202339172), (0.67647057771682739, 0.36078432202339172,
0.36078432202339172), (0.680672287940979, 0.36078432202339172,
0.36078432202339172), (0.68487393856048584, 0.364705890417099,
0.364705890417099), (0.68907564878463745, 0.364705890417099,
0.364705890417099), (0.69327729940414429, 0.364705890417099,
0.364705890417099), (0.6974790096282959, 0.36862745881080627,
0.36862745881080627), (0.70168066024780273, 0.36862745881080627,
0.36862745881080627), (0.70588237047195435, 0.36862745881080627,
0.36862745881080627), (0.71008402109146118, 0.37254902720451355,
0.37254902720451355), (0.71428573131561279, 0.37254902720451355,
0.37254902720451355), (0.71848738193511963, 0.37254902720451355,
0.37254902720451355), (0.72268909215927124, 0.37647059559822083,
0.37647059559822083), (0.72689074277877808, 0.37647059559822083,
0.37647059559822083), (0.73109245300292969, 0.3803921639919281,
0.3803921639919281), (0.73529410362243652, 0.3803921639919281,
0.3803921639919281), (0.73949581384658813, 0.3803921639919281,
0.3803921639919281), (0.74369746446609497, 0.38431373238563538,
0.38431373238563538), (0.74789917469024658, 0.38431373238563538,
0.38431373238563538), (0.75210082530975342, 0.38431373238563538,
0.38431373238563538), (0.75630253553390503, 0.38823530077934265,
0.38823530077934265), (0.76050418615341187, 0.38823530077934265,
0.38823530077934265), (0.76470589637756348, 0.38823530077934265,
0.38823530077934265), (0.76890754699707031, 0.39215686917304993,
0.39215686917304993), (0.77310925722122192, 0.39215686917304993,
0.39215686917304993), (0.77731090784072876, 0.39215686917304993,
0.39215686917304993), (0.78151261806488037, 0.3960784375667572,
0.3960784375667572), (0.78571426868438721, 0.3960784375667572,
0.3960784375667572), (0.78991597890853882, 0.40784314274787903,
0.40784314274787903), (0.79411762952804565, 0.41568627953529358,
0.41568627953529358), (0.79831933975219727, 0.42352941632270813,
0.42352941632270813), (0.8025209903717041, 0.43529412150382996,
0.43529412150382996), (0.80672270059585571, 0.44313725829124451,
0.44313725829124451), (0.81092435121536255, 0.45490196347236633,
0.45490196347236633), (0.81512606143951416, 0.46274510025978088,
0.46274510025978088), (0.819327712059021, 0.47450980544090271,
0.47450980544090271), (0.82352942228317261, 0.48235294222831726,
0.48235294222831726), (0.82773107290267944, 0.49411764740943909,
0.49411764740943909), (0.83193278312683105, 0.5058823823928833,
0.5058823823928833), (0.83613443374633789, 0.51372551918029785,
0.51372551918029785), (0.8403361439704895, 0.52549022436141968,
0.52549022436141968), (0.84453779458999634, 0.5372549295425415,
0.5372549295425415), (0.84873950481414795, 0.54509806632995605,
0.54509806632995605), (0.85294115543365479, 0.55686277151107788,
0.55686277151107788), (0.8571428656578064, 0.56862747669219971,
0.56862747669219971), (0.86134451627731323, 0.58039218187332153,
0.58039218187332153), (0.86554622650146484, 0.58823531866073608,
0.58823531866073608), (0.86974787712097168, 0.60000002384185791,
0.60000002384185791), (0.87394958734512329, 0.61176472902297974,
0.61176472902297974), (0.87815123796463013, 0.62352943420410156,
0.62352943420410156), (0.88235294818878174, 0.63529413938522339,
0.63529413938522339), (0.88655459880828857, 0.64705884456634521,
0.64705884456634521), (0.89075630903244019, 0.65882354974746704,
0.65882354974746704), (0.89495795965194702, 0.66666668653488159,
0.66666668653488159), (0.89915966987609863, 0.67843139171600342,
0.67843139171600342), (0.90336132049560547, 0.69019609689712524,
0.69019609689712524), (0.90756303071975708, 0.70196080207824707,
0.70196080207824707), (0.91176468133926392, 0.7137255072593689,
0.7137255072593689), (0.91596639156341553, 0.72549021244049072,
0.72549021244049072), (0.92016804218292236, 0.74117648601531982,
0.74117648601531982), (0.92436975240707397, 0.75294119119644165,
0.75294119119644165), (0.92857140302658081, 0.76470589637756348,
0.76470589637756348), (0.93277311325073242, 0.7764706015586853,
0.7764706015586853), (0.93697476387023926, 0.78823530673980713,
0.78823530673980713), (0.94117647409439087, 0.80000001192092896,
0.80000001192092896), (0.94537812471389771, 0.81176471710205078,
0.81176471710205078), (0.94957983493804932, 0.82745099067687988,
0.82745099067687988), (0.95378148555755615, 0.83921569585800171,
0.83921569585800171), (0.95798319578170776, 0.85098040103912354,
0.85098040103912354), (0.9621848464012146, 0.86274510622024536,
0.86274510622024536), (0.96638655662536621, 0.87843137979507446,
0.87843137979507446), (0.97058820724487305, 0.89019608497619629,
0.89019608497619629), (0.97478991746902466, 0.90196079015731812,
0.90196079015731812), (0.97899156808853149, 0.91764706373214722,
0.91764706373214722), (0.98319327831268311, 0.92941176891326904,
0.92941176891326904), (0.98739492893218994, 0.94509804248809814,
0.94509804248809814), (0.99159663915634155, 0.95686274766921997,
0.95686274766921997), (0.99579828977584839, 0.97254902124404907,
0.97254902124404907), (1.0, 0.9843137264251709, 0.9843137264251709)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.011764706112444401, 0.011764706112444401),
(0.037815127521753311, 0.023529412224888802, 0.023529412224888802),
(0.042016807943582535, 0.031372550874948502, 0.031372550874948502),
(0.046218488365411758, 0.043137256056070328, 0.043137256056070328),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.062745101749897003, 0.062745101749897003),
(0.058823529630899429, 0.070588238537311554, 0.070588238537311554),
(0.063025213778018951, 0.08235294371843338, 0.08235294371843338),
(0.067226894199848175, 0.090196080505847931, 0.090196080505847931),
(0.071428574621677399, 0.10196078568696976, 0.10196078568696976),
(0.075630255043506622, 0.10980392247438431, 0.10980392247438431),
(0.079831935465335846, 0.12156862765550613, 0.12156862765550613),
(0.08403361588716507, 0.12941177189350128, 0.12941177189350128),
(0.088235296308994293, 0.14117647707462311, 0.14117647707462311),
(0.092436976730823517, 0.14901961386203766, 0.14901961386203766),
(0.09663865715265274, 0.16078431904315948, 0.16078431904315948),
(0.10084033757448196, 0.16862745583057404, 0.16862745583057404),
(0.10504201799631119, 0.17647059261798859, 0.17647059261798859),
(0.10924369841814041, 0.18823529779911041, 0.18823529779911041),
(0.11344537883996964, 0.19607843458652496, 0.19607843458652496),
(0.11764705926179886, 0.20392157137393951, 0.20392157137393951),
(0.12184873968362808, 0.21568627655506134, 0.21568627655506134),
(0.1260504275560379, 0.22352941334247589, 0.22352941334247589),
(0.13025210797786713, 0.23137255012989044, 0.23137255012989044),
(0.13445378839969635, 0.23921568691730499, 0.23921568691730499),
(0.13865546882152557, 0.25098040699958801, 0.25098040699958801),
(0.1428571492433548, 0.25882354378700256, 0.25882354378700256),
(0.14705882966518402, 0.26666668057441711, 0.26666668057441711),
(0.15126051008701324, 0.27450981736183167, 0.27450981736183167),
(0.15546219050884247, 0.28235295414924622, 0.28235295414924622),
(0.15966387093067169, 0.29019609093666077, 0.29019609093666077),
(0.16386555135250092, 0.30196079611778259, 0.30196079611778259),
(0.16806723177433014, 0.30980393290519714, 0.30980393290519714),
(0.17226891219615936, 0.31764706969261169, 0.31764706969261169),
(0.17647059261798859, 0.32549020648002625, 0.32549020648002625),
(0.18067227303981781, 0.3333333432674408, 0.3333333432674408),
(0.18487395346164703, 0.34117648005485535, 0.34117648005485535),
(0.18907563388347626, 0.3490196168422699, 0.3490196168422699),
(0.19327731430530548, 0.35686275362968445, 0.35686275362968445),
(0.1974789947271347, 0.364705890417099, 0.364705890417099),
(0.20168067514896393, 0.37254902720451355, 0.37254902720451355),
(0.20588235557079315, 0.3803921639919281, 0.3803921639919281),
(0.21008403599262238, 0.38823530077934265, 0.38823530077934265),
(0.2142857164144516, 0.39215686917304993, 0.39215686917304993),
(0.21848739683628082, 0.40000000596046448, 0.40000000596046448),
(0.22268907725811005, 0.40784314274787903, 0.40784314274787903),
(0.22689075767993927, 0.41568627953529358, 0.41568627953529358),
(0.23109243810176849, 0.42352941632270813, 0.42352941632270813),
(0.23529411852359772, 0.42745098471641541, 0.42745098471641541),
(0.23949579894542694, 0.43529412150382996, 0.43529412150382996),
(0.24369747936725616, 0.44313725829124451, 0.44313725829124451),
(0.24789915978908539, 0.45098039507865906, 0.45098039507865906),
(0.25210085511207581, 0.45490196347236633, 0.45490196347236633),
(0.25630253553390503, 0.46274510025978088, 0.46274510025978088),
(0.26050421595573425, 0.47058823704719543, 0.47058823704719543),
(0.26470589637756348, 0.47450980544090271, 0.47450980544090271),
(0.2689075767993927, 0.48235294222831726, 0.48235294222831726),
(0.27310925722122192, 0.49019607901573181, 0.49019607901573181),
(0.27731093764305115, 0.49411764740943909, 0.49411764740943909),
(0.28151261806488037, 0.50196081399917603, 0.50196081399917603),
(0.28571429848670959, 0.50196081399917603, 0.50196081399917603),
(0.28991597890853882, 0.5058823823928833, 0.5058823823928833),
(0.29411765933036804, 0.5058823823928833, 0.5058823823928833),
(0.29831933975219727, 0.50980395078659058, 0.50980395078659058),
(0.30252102017402649, 0.51372551918029785, 0.51372551918029785),
(0.30672270059585571, 0.51372551918029785, 0.51372551918029785),
(0.31092438101768494, 0.51764708757400513, 0.51764708757400513),
(0.31512606143951416, 0.5215686559677124, 0.5215686559677124),
(0.31932774186134338, 0.5215686559677124, 0.5215686559677124),
(0.32352942228317261, 0.52549022436141968, 0.52549022436141968),
(0.32773110270500183, 0.52549022436141968, 0.52549022436141968),
(0.33193278312683105, 0.52941179275512695, 0.52941179275512695),
(0.33613446354866028, 0.53333336114883423, 0.53333336114883423),
(0.3403361439704895, 0.53333336114883423, 0.53333336114883423),
(0.34453782439231873, 0.5372549295425415, 0.5372549295425415),
(0.34873950481414795, 0.54117649793624878, 0.54117649793624878),
(0.35294118523597717, 0.54117649793624878, 0.54117649793624878),
(0.3571428656578064, 0.54509806632995605, 0.54509806632995605),
(0.36134454607963562, 0.54901963472366333, 0.54901963472366333),
(0.36554622650146484, 0.54901963472366333, 0.54901963472366333),
(0.36974790692329407, 0.55294120311737061, 0.55294120311737061),
(0.37394958734512329, 0.55294120311737061, 0.55294120311737061),
(0.37815126776695251, 0.55686277151107788, 0.55686277151107788),
(0.38235294818878174, 0.56078433990478516, 0.56078433990478516),
(0.38655462861061096, 0.56078433990478516, 0.56078433990478516),
(0.39075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.39495798945426941, 0.56862747669219971, 0.56862747669219971),
(0.39915966987609863, 0.56862747669219971, 0.56862747669219971),
(0.40336135029792786, 0.57254904508590698, 0.57254904508590698),
(0.40756303071975708, 0.57254904508590698, 0.57254904508590698),
(0.4117647111415863, 0.57647061347961426, 0.57647061347961426),
(0.41596639156341553, 0.58039218187332153, 0.58039218187332153),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.58431375026702881, 0.58431375026702881),
(0.4285714328289032, 0.58823531866073608, 0.58823531866073608),
(0.43277311325073242, 0.58823531866073608, 0.58823531866073608),
(0.43697479367256165, 0.59215688705444336, 0.59215688705444336),
(0.44117647409439087, 0.59215688705444336, 0.59215688705444336),
(0.44537815451622009, 0.59607845544815063, 0.59607845544815063),
(0.44957983493804932, 0.60000002384185791, 0.60000002384185791),
(0.45378151535987854, 0.60000002384185791, 0.60000002384185791),
(0.45798319578170776, 0.60392159223556519, 0.60392159223556519),
(0.46218487620353699, 0.60784316062927246, 0.60784316062927246),
(0.46638655662536621, 0.60784316062927246, 0.60784316062927246),
(0.47058823704719543, 0.61176472902297974, 0.61176472902297974),
(0.47478991746902466, 0.61176472902297974, 0.61176472902297974),
(0.47899159789085388, 0.61568629741668701, 0.61568629741668701),
(0.48319327831268311, 0.61960786581039429, 0.61960786581039429),
(0.48739495873451233, 0.61960786581039429, 0.61960786581039429),
(0.49159663915634155, 0.62352943420410156, 0.62352943420410156),
(0.49579831957817078, 0.62745100259780884, 0.62745100259780884), (0.5,
0.62745100259780884, 0.62745100259780884), (0.50420171022415161,
0.63137257099151611, 0.63137257099151611), (0.50840336084365845,
0.63137257099151611, 0.63137257099151611), (0.51260507106781006,
0.63529413938522339, 0.63529413938522339), (0.51680672168731689,
0.63921570777893066, 0.63921570777893066), (0.52100843191146851,
0.63921570777893066, 0.63921570777893066), (0.52521008253097534,
0.64313727617263794, 0.64313727617263794), (0.52941179275512695,
0.64705884456634521, 0.64705884456634521), (0.53361344337463379,
0.64705884456634521, 0.64705884456634521), (0.5378151535987854,
0.65098041296005249, 0.65098041296005249), (0.54201680421829224,
0.65098041296005249, 0.65098041296005249), (0.54621851444244385,
0.65490198135375977, 0.65490198135375977), (0.55042016506195068,
0.65882354974746704, 0.65882354974746704), (0.55462187528610229,
0.65882354974746704, 0.65882354974746704), (0.55882352590560913,
0.65882354974746704, 0.65882354974746704), (0.56302523612976074,
0.66274511814117432, 0.66274511814117432), (0.56722688674926758,
0.66274511814117432, 0.66274511814117432), (0.57142859697341919,
0.66666668653488159, 0.66666668653488159), (0.57563024759292603,
0.66666668653488159, 0.66666668653488159), (0.57983195781707764,
0.67058825492858887, 0.67058825492858887), (0.58403360843658447,
0.67058825492858887, 0.67058825492858887), (0.58823531866073608,
0.67450982332229614, 0.67450982332229614), (0.59243696928024292,
0.67450982332229614, 0.67450982332229614), (0.59663867950439453,
0.67450982332229614, 0.67450982332229614), (0.60084033012390137,
0.67843139171600342, 0.67843139171600342), (0.60504204034805298,
0.67843139171600342, 0.67843139171600342), (0.60924369096755981,
0.68235296010971069, 0.68235296010971069), (0.61344540119171143,
0.68235296010971069, 0.68235296010971069), (0.61764705181121826,
0.68627452850341797, 0.68627452850341797), (0.62184876203536987,
0.68627452850341797, 0.68627452850341797), (0.62605041265487671,
0.68627452850341797, 0.68627452850341797), (0.63025212287902832,
0.69019609689712524, 0.69019609689712524), (0.63445377349853516,
0.69019609689712524, 0.69019609689712524), (0.63865548372268677,
0.69411766529083252, 0.69411766529083252), (0.6428571343421936,
0.69411766529083252, 0.69411766529083252), (0.64705884456634521,
0.69803923368453979, 0.69803923368453979), (0.65126049518585205,
0.69803923368453979, 0.69803923368453979), (0.65546220541000366,
0.70196080207824707, 0.70196080207824707), (0.6596638560295105,
0.70196080207824707, 0.70196080207824707), (0.66386556625366211,
0.70196080207824707, 0.70196080207824707), (0.66806721687316895,
0.70588237047195435, 0.70588237047195435), (0.67226892709732056,
0.70588237047195435, 0.70588237047195435), (0.67647057771682739,
0.70980393886566162, 0.70980393886566162), (0.680672287940979,
0.70980393886566162, 0.70980393886566162), (0.68487393856048584,
0.7137255072593689, 0.7137255072593689), (0.68907564878463745,
0.7137255072593689, 0.7137255072593689), (0.69327729940414429,
0.71764707565307617, 0.71764707565307617), (0.6974790096282959,
0.71764707565307617, 0.71764707565307617), (0.70168066024780273,
0.7137255072593689, 0.7137255072593689), (0.70588237047195435,
0.70980393886566162, 0.70980393886566162), (0.71008402109146118,
0.70980393886566162, 0.70980393886566162), (0.71428573131561279,
0.70588237047195435, 0.70588237047195435), (0.71848738193511963,
0.70196080207824707, 0.70196080207824707), (0.72268909215927124,
0.69803923368453979, 0.69803923368453979), (0.72689074277877808,
0.69411766529083252, 0.69411766529083252), (0.73109245300292969,
0.69019609689712524, 0.69019609689712524), (0.73529410362243652,
0.68627452850341797, 0.68627452850341797), (0.73949581384658813,
0.68235296010971069, 0.68235296010971069), (0.74369746446609497,
0.67843139171600342, 0.67843139171600342), (0.74789917469024658,
0.67450982332229614, 0.67450982332229614), (0.75210082530975342,
0.67058825492858887, 0.67058825492858887), (0.75630253553390503,
0.66666668653488159, 0.66666668653488159), (0.76050418615341187,
0.66274511814117432, 0.66274511814117432), (0.76470589637756348,
0.65882354974746704, 0.65882354974746704), (0.76890754699707031,
0.65490198135375977, 0.65490198135375977), (0.77310925722122192,
0.65098041296005249, 0.65098041296005249), (0.77731090784072876,
0.64705884456634521, 0.64705884456634521), (0.78151261806488037,
0.64313727617263794, 0.64313727617263794), (0.78571426868438721,
0.63921570777893066, 0.63921570777893066), (0.78991597890853882,
0.63921570777893066, 0.63921570777893066), (0.79411762952804565,
0.64313727617263794, 0.64313727617263794), (0.79831933975219727,
0.64313727617263794, 0.64313727617263794), (0.8025209903717041,
0.64705884456634521, 0.64705884456634521), (0.80672270059585571,
0.64705884456634521, 0.64705884456634521), (0.81092435121536255,
0.65098041296005249, 0.65098041296005249), (0.81512606143951416,
0.65490198135375977, 0.65490198135375977), (0.819327712059021,
0.65490198135375977, 0.65490198135375977), (0.82352942228317261,
0.65882354974746704, 0.65882354974746704), (0.82773107290267944,
0.66274511814117432, 0.66274511814117432), (0.83193278312683105,
0.66666668653488159, 0.66666668653488159), (0.83613443374633789,
0.67058825492858887, 0.67058825492858887), (0.8403361439704895,
0.67450982332229614, 0.67450982332229614), (0.84453779458999634,
0.67843139171600342, 0.67843139171600342), (0.84873950481414795,
0.68235296010971069, 0.68235296010971069), (0.85294115543365479,
0.68627452850341797, 0.68627452850341797), (0.8571428656578064,
0.69019609689712524, 0.69019609689712524), (0.86134451627731323,
0.69411766529083252, 0.69411766529083252), (0.86554622650146484,
0.69803923368453979, 0.69803923368453979), (0.86974787712097168,
0.70196080207824707, 0.70196080207824707), (0.87394958734512329,
0.70980393886566162, 0.70980393886566162), (0.87815123796463013,
0.7137255072593689, 0.7137255072593689), (0.88235294818878174,
0.72156864404678345, 0.72156864404678345), (0.88655459880828857,
0.72549021244049072, 0.72549021244049072), (0.89075630903244019,
0.73333334922790527, 0.73333334922790527), (0.89495795965194702,
0.73725491762161255, 0.73725491762161255), (0.89915966987609863,
0.7450980544090271, 0.7450980544090271), (0.90336132049560547,
0.75294119119644165, 0.75294119119644165), (0.90756303071975708,
0.7607843279838562, 0.7607843279838562), (0.91176468133926392,
0.76862746477127075, 0.76862746477127075), (0.91596639156341553,
0.7764706015586853, 0.7764706015586853), (0.92016804218292236,
0.78431373834609985, 0.78431373834609985), (0.92436975240707397,
0.7921568751335144, 0.7921568751335144), (0.92857140302658081,
0.80000001192092896, 0.80000001192092896), (0.93277311325073242,
0.80784314870834351, 0.80784314870834351), (0.93697476387023926,
0.81568628549575806, 0.81568628549575806), (0.94117647409439087,
0.82745099067687988, 0.82745099067687988), (0.94537812471389771,
0.83529412746429443, 0.83529412746429443), (0.94957983493804932,
0.84313726425170898, 0.84313726425170898), (0.95378148555755615,
0.85490196943283081, 0.85490196943283081), (0.95798319578170776,
0.86666667461395264, 0.86666667461395264), (0.9621848464012146,
0.87450981140136719, 0.87450981140136719), (0.96638655662536621,
0.88627451658248901, 0.88627451658248901), (0.97058820724487305,
0.89803922176361084, 0.89803922176361084), (0.97478991746902466,
0.90980392694473267, 0.90980392694473267), (0.97899156808853149,
0.92156863212585449, 0.92156863212585449), (0.98319327831268311,
0.93333333730697632, 0.93333333730697632), (0.98739492893218994,
0.94509804248809814, 0.94509804248809814), (0.99159663915634155,
0.95686274766921997, 0.95686274766921997), (0.99579828977584839,
0.97254902124404907, 0.97254902124404907), (1.0, 0.9843137264251709,
0.9843137264251709)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0, 0.0), (0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0,
0.0), (0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.0, 0.0), (0.037815127521753311,
0.0039215688593685627, 0.0039215688593685627), (0.042016807943582535,
0.0078431377187371254, 0.0078431377187371254), (0.046218488365411758,
0.0078431377187371254, 0.0078431377187371254), (0.050420168787240982,
0.011764706112444401, 0.011764706112444401), (0.054621849209070206,
0.015686275437474251, 0.015686275437474251), (0.058823529630899429,
0.019607843831181526, 0.019607843831181526), (0.063025213778018951,
0.019607843831181526, 0.019607843831181526), (0.067226894199848175,
0.023529412224888802, 0.023529412224888802), (0.071428574621677399,
0.027450980618596077, 0.027450980618596077), (0.075630255043506622,
0.031372550874948502, 0.031372550874948502), (0.079831935465335846,
0.031372550874948502, 0.031372550874948502), (0.08403361588716507,
0.035294119268655777, 0.035294119268655777), (0.088235296308994293,
0.039215687662363052, 0.039215687662363052), (0.092436976730823517,
0.043137256056070328, 0.043137256056070328), (0.09663865715265274,
0.043137256056070328, 0.043137256056070328), (0.10084033757448196,
0.047058824449777603, 0.047058824449777603), (0.10504201799631119,
0.050980392843484879, 0.050980392843484879), (0.10924369841814041,
0.054901961237192154, 0.054901961237192154), (0.11344537883996964,
0.058823529630899429, 0.058823529630899429), (0.11764705926179886,
0.058823529630899429, 0.058823529630899429), (0.12184873968362808,
0.062745101749897003, 0.062745101749897003), (0.1260504275560379,
0.066666670143604279, 0.066666670143604279), (0.13025210797786713,
0.070588238537311554, 0.070588238537311554), (0.13445378839969635,
0.070588238537311554, 0.070588238537311554), (0.13865546882152557,
0.074509806931018829, 0.074509806931018829), (0.1428571492433548,
0.078431375324726105, 0.078431375324726105), (0.14705882966518402,
0.08235294371843338, 0.08235294371843338), (0.15126051008701324,
0.086274512112140656, 0.086274512112140656), (0.15546219050884247,
0.086274512112140656, 0.086274512112140656), (0.15966387093067169,
0.090196080505847931, 0.090196080505847931), (0.16386555135250092,
0.094117648899555206, 0.094117648899555206), (0.16806723177433014,
0.098039217293262482, 0.098039217293262482), (0.17226891219615936,
0.10196078568696976, 0.10196078568696976), (0.17647059261798859,
0.10196078568696976, 0.10196078568696976), (0.18067227303981781,
0.10588235408067703, 0.10588235408067703), (0.18487395346164703,
0.10980392247438431, 0.10980392247438431), (0.18907563388347626,
0.11372549086809158, 0.11372549086809158), (0.19327731430530548,
0.11764705926179886, 0.11764705926179886), (0.1974789947271347,
0.12156862765550613, 0.12156862765550613), (0.20168067514896393,
0.12156862765550613, 0.12156862765550613), (0.20588235557079315,
0.12549020349979401, 0.12549020349979401), (0.21008403599262238,
0.12941177189350128, 0.12941177189350128), (0.2142857164144516,
0.13333334028720856, 0.13333334028720856), (0.21848739683628082,
0.13725490868091583, 0.13725490868091583), (0.22268907725811005,
0.14117647707462311, 0.14117647707462311), (0.22689075767993927,
0.14117647707462311, 0.14117647707462311), (0.23109243810176849,
0.14509804546833038, 0.14509804546833038), (0.23529411852359772,
0.14901961386203766, 0.14901961386203766), (0.23949579894542694,
0.15294118225574493, 0.15294118225574493), (0.24369747936725616,
0.15686275064945221, 0.15686275064945221), (0.24789915978908539,
0.16078431904315948, 0.16078431904315948), (0.25210085511207581,
0.16078431904315948, 0.16078431904315948), (0.25630253553390503,
0.16470588743686676, 0.16470588743686676), (0.26050421595573425,
0.16862745583057404, 0.16862745583057404), (0.26470589637756348,
0.17254902422428131, 0.17254902422428131), (0.2689075767993927,
0.17647059261798859, 0.17647059261798859), (0.27310925722122192,
0.18039216101169586, 0.18039216101169586), (0.27731093764305115,
0.18431372940540314, 0.18431372940540314), (0.28151261806488037,
0.18823529779911041, 0.18823529779911041), (0.28571429848670959,
0.18823529779911041, 0.18823529779911041), (0.28991597890853882,
0.18823529779911041, 0.18823529779911041), (0.29411765933036804,
0.19215686619281769, 0.19215686619281769), (0.29831933975219727,
0.19215686619281769, 0.19215686619281769), (0.30252102017402649,
0.19607843458652496, 0.19607843458652496), (0.30672270059585571,
0.19607843458652496, 0.19607843458652496), (0.31092438101768494,
0.20000000298023224, 0.20000000298023224), (0.31512606143951416,
0.20000000298023224, 0.20000000298023224), (0.31932774186134338,
0.20392157137393951, 0.20392157137393951), (0.32352942228317261,
0.20392157137393951, 0.20392157137393951), (0.32773110270500183,
0.20784313976764679, 0.20784313976764679), (0.33193278312683105,
0.20784313976764679, 0.20784313976764679), (0.33613446354866028,
0.21176470816135406, 0.21176470816135406), (0.3403361439704895,
0.21176470816135406, 0.21176470816135406), (0.34453782439231873,
0.21568627655506134, 0.21568627655506134), (0.34873950481414795,
0.21568627655506134, 0.21568627655506134), (0.35294118523597717,
0.21960784494876862, 0.21960784494876862), (0.3571428656578064,
0.21960784494876862, 0.21960784494876862), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.22352941334247589, 0.22352941334247589), (0.36974790692329407,
0.22745098173618317, 0.22745098173618317), (0.37394958734512329,
0.22745098173618317, 0.22745098173618317), (0.37815126776695251,
0.23137255012989044, 0.23137255012989044), (0.38235294818878174,
0.23137255012989044, 0.23137255012989044), (0.38655462861061096,
0.23529411852359772, 0.23529411852359772), (0.39075630903244019,
0.23921568691730499, 0.23921568691730499), (0.39495798945426941,
0.23921568691730499, 0.23921568691730499), (0.39915966987609863,
0.24313725531101227, 0.24313725531101227), (0.40336135029792786,
0.24313725531101227, 0.24313725531101227), (0.40756303071975708,
0.24705882370471954, 0.24705882370471954), (0.4117647111415863,
0.24705882370471954, 0.24705882370471954), (0.41596639156341553,
0.25098040699958801, 0.25098040699958801), (0.42016807198524475,
0.25098040699958801, 0.25098040699958801), (0.42436975240707397,
0.25490197539329529, 0.25490197539329529), (0.4285714328289032,
0.25490197539329529, 0.25490197539329529), (0.43277311325073242,
0.25882354378700256, 0.25882354378700256), (0.43697479367256165,
0.26274511218070984, 0.26274511218070984), (0.44117647409439087,
0.26274511218070984, 0.26274511218070984), (0.44537815451622009,
0.26666668057441711, 0.26666668057441711), (0.44957983493804932,
0.26666668057441711, 0.26666668057441711), (0.45378151535987854,
0.27058824896812439, 0.27058824896812439), (0.45798319578170776,
0.27058824896812439, 0.27058824896812439), (0.46218487620353699,
0.27450981736183167, 0.27450981736183167), (0.46638655662536621,
0.27843138575553894, 0.27843138575553894), (0.47058823704719543,
0.28627452254295349, 0.28627452254295349), (0.47478991746902466,
0.29803922772407532, 0.29803922772407532), (0.47899159789085388,
0.30588236451148987, 0.30588236451148987), (0.48319327831268311,
0.31764706969261169, 0.31764706969261169), (0.48739495873451233,
0.32549020648002625, 0.32549020648002625), (0.49159663915634155,
0.33725491166114807, 0.33725491166114807), (0.49579831957817078,
0.34509804844856262, 0.34509804844856262), (0.5, 0.35686275362968445,
0.35686275362968445), (0.50420171022415161, 0.36862745881080627,
0.36862745881080627), (0.50840336084365845, 0.37647059559822083,
0.37647059559822083), (0.51260507106781006, 0.38823530077934265,
0.38823530077934265), (0.51680672168731689, 0.3960784375667572,
0.3960784375667572), (0.52100843191146851, 0.40784314274787903,
0.40784314274787903), (0.52521008253097534, 0.41568627953529358,
0.41568627953529358), (0.52941179275512695, 0.42745098471641541,
0.42745098471641541), (0.53361344337463379, 0.43529412150382996,
0.43529412150382996), (0.5378151535987854, 0.44705882668495178,
0.44705882668495178), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.46666666865348816,
0.46666666865348816), (0.55042016506195068, 0.47450980544090271,
0.47450980544090271), (0.55462187528610229, 0.47843137383460999,
0.47843137383460999), (0.55882352590560913, 0.48627451062202454,
0.48627451062202454), (0.56302523612976074, 0.49411764740943909,
0.49411764740943909), (0.56722688674926758, 0.50196081399917603,
0.50196081399917603), (0.57142859697341919, 0.5058823823928833,
0.5058823823928833), (0.57563024759292603, 0.51372551918029785,
0.51372551918029785), (0.57983195781707764, 0.5215686559677124,
0.5215686559677124), (0.58403360843658447, 0.52941179275512695,
0.52941179275512695), (0.58823531866073608, 0.53333336114883423,
0.53333336114883423), (0.59243696928024292, 0.54117649793624878,
0.54117649793624878), (0.59663867950439453, 0.54901963472366333,
0.54901963472366333), (0.60084033012390137, 0.55294120311737061,
0.55294120311737061), (0.60504204034805298, 0.56078433990478516,
0.56078433990478516), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.57647061347961426,
0.57647061347961426), (0.61764705181121826, 0.58431375026702881,
0.58431375026702881), (0.62184876203536987, 0.58823531866073608,
0.58823531866073608), (0.62605041265487671, 0.59607845544815063,
0.59607845544815063), (0.63025212287902832, 0.60392159223556519,
0.60392159223556519), (0.63445377349853516, 0.61176472902297974,
0.61176472902297974), (0.63865548372268677, 0.61568629741668701,
0.61568629741668701), (0.6428571343421936, 0.62352943420410156,
0.62352943420410156), (0.64705884456634521, 0.63137257099151611,
0.63137257099151611), (0.65126049518585205, 0.63921570777893066,
0.63921570777893066), (0.65546220541000366, 0.64705884456634521,
0.64705884456634521), (0.6596638560295105, 0.65098041296005249,
0.65098041296005249), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67450982332229614,
0.67450982332229614), (0.67647057771682739, 0.68235296010971069,
0.68235296010971069), (0.680672287940979, 0.68627452850341797,
0.68627452850341797), (0.68487393856048584, 0.69411766529083252,
0.69411766529083252), (0.68907564878463745, 0.70196080207824707,
0.70196080207824707), (0.69327729940414429, 0.70980393886566162,
0.70980393886566162), (0.6974790096282959, 0.71764707565307617,
0.71764707565307617), (0.70168066024780273, 0.71764707565307617,
0.71764707565307617), (0.70588237047195435, 0.72156864404678345,
0.72156864404678345), (0.71008402109146118, 0.72156864404678345,
0.72156864404678345), (0.71428573131561279, 0.72549021244049072,
0.72549021244049072), (0.71848738193511963, 0.72549021244049072,
0.72549021244049072), (0.72268909215927124, 0.729411780834198,
0.729411780834198), (0.72689074277877808, 0.729411780834198,
0.729411780834198), (0.73109245300292969, 0.73333334922790527,
0.73333334922790527), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.73725491762161255,
0.73725491762161255), (0.75210082530975342, 0.74117648601531982,
0.74117648601531982), (0.75630253553390503, 0.74117648601531982,
0.74117648601531982), (0.76050418615341187, 0.7450980544090271,
0.7450980544090271), (0.76470589637756348, 0.7450980544090271,
0.7450980544090271), (0.76890754699707031, 0.7450980544090271,
0.7450980544090271), (0.77310925722122192, 0.74901962280273438,
0.74901962280273438), (0.77731090784072876, 0.74901962280273438,
0.74901962280273438), (0.78151261806488037, 0.75294119119644165,
0.75294119119644165), (0.78571426868438721, 0.75294119119644165,
0.75294119119644165), (0.78991597890853882, 0.75686275959014893,
0.75686275959014893), (0.79411762952804565, 0.76470589637756348,
0.76470589637756348), (0.79831933975219727, 0.76862746477127075,
0.76862746477127075), (0.8025209903717041, 0.77254903316497803,
0.77254903316497803), (0.80672270059585571, 0.7764706015586853,
0.7764706015586853), (0.81092435121536255, 0.78039216995239258,
0.78039216995239258), (0.81512606143951416, 0.78823530673980713,
0.78823530673980713), (0.819327712059021, 0.7921568751335144,
0.7921568751335144), (0.82352942228317261, 0.79607844352722168,
0.79607844352722168), (0.82773107290267944, 0.80000001192092896,
0.80000001192092896), (0.83193278312683105, 0.80392158031463623,
0.80392158031463623), (0.83613443374633789, 0.81176471710205078,
0.81176471710205078), (0.8403361439704895, 0.81568628549575806,
0.81568628549575806), (0.84453779458999634, 0.81960785388946533,
0.81960785388946533), (0.84873950481414795, 0.82352942228317261,
0.82352942228317261), (0.85294115543365479, 0.82745099067687988,
0.82745099067687988), (0.8571428656578064, 0.83529412746429443,
0.83529412746429443), (0.86134451627731323, 0.83921569585800171,
0.83921569585800171), (0.86554622650146484, 0.84313726425170898,
0.84313726425170898), (0.86974787712097168, 0.84705883264541626,
0.84705883264541626), (0.87394958734512329, 0.85098040103912354,
0.85098040103912354), (0.87815123796463013, 0.85882353782653809,
0.85882353782653809), (0.88235294818878174, 0.86274510622024536,
0.86274510622024536), (0.88655459880828857, 0.86666667461395264,
0.86666667461395264), (0.89075630903244019, 0.87058824300765991,
0.87058824300765991), (0.89495795965194702, 0.87450981140136719,
0.87450981140136719), (0.89915966987609863, 0.88235294818878174,
0.88235294818878174), (0.90336132049560547, 0.88627451658248901,
0.88627451658248901), (0.90756303071975708, 0.89019608497619629,
0.89019608497619629), (0.91176468133926392, 0.89411765336990356,
0.89411765336990356), (0.91596639156341553, 0.89803922176361084,
0.89803922176361084), (0.92016804218292236, 0.90588235855102539,
0.90588235855102539), (0.92436975240707397, 0.90980392694473267,
0.90980392694473267), (0.92857140302658081, 0.91372549533843994,
0.91372549533843994), (0.93277311325073242, 0.91764706373214722,
0.91764706373214722), (0.93697476387023926, 0.92156863212585449,
0.92156863212585449), (0.94117647409439087, 0.92941176891326904,
0.92941176891326904), (0.94537812471389771, 0.93333333730697632,
0.93333333730697632), (0.94957983493804932, 0.93725490570068359,
0.93725490570068359), (0.95378148555755615, 0.94117647409439087,
0.94117647409439087), (0.95798319578170776, 0.94509804248809814,
0.94509804248809814), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.95686274766921997,
0.95686274766921997), (0.97058820724487305, 0.96078431606292725,
0.96078431606292725), (0.97478991746902466, 0.96470588445663452,
0.96470588445663452), (0.97899156808853149, 0.9686274528503418,
0.9686274528503418), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_gray_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0039215688593685627, 0.0039215688593685627), (0.0084033617749810219,
0.0078431377187371254, 0.0078431377187371254), (0.012605042196810246,
0.011764706112444401, 0.011764706112444401), (0.016806723549962044,
0.015686275437474251, 0.015686275437474251), (0.021008403971791267,
0.019607843831181526, 0.019607843831181526), (0.025210084393620491,
0.023529412224888802, 0.023529412224888802), (0.029411764815449715,
0.027450980618596077, 0.027450980618596077), (0.033613447099924088,
0.035294119268655777, 0.035294119268655777), (0.037815127521753311,
0.039215687662363052, 0.039215687662363052), (0.042016807943582535,
0.043137256056070328, 0.043137256056070328), (0.046218488365411758,
0.047058824449777603, 0.047058824449777603), (0.050420168787240982,
0.050980392843484879, 0.050980392843484879), (0.054621849209070206,
0.054901961237192154, 0.054901961237192154), (0.058823529630899429,
0.058823529630899429, 0.058823529630899429), (0.063025213778018951,
0.062745101749897003, 0.062745101749897003), (0.067226894199848175,
0.066666670143604279, 0.066666670143604279), (0.071428574621677399,
0.070588238537311554, 0.070588238537311554), (0.075630255043506622,
0.074509806931018829, 0.074509806931018829), (0.079831935465335846,
0.078431375324726105, 0.078431375324726105), (0.08403361588716507,
0.08235294371843338, 0.08235294371843338), (0.088235296308994293,
0.086274512112140656, 0.086274512112140656), (0.092436976730823517,
0.090196080505847931, 0.090196080505847931), (0.09663865715265274,
0.098039217293262482, 0.098039217293262482), (0.10084033757448196,
0.10196078568696976, 0.10196078568696976), (0.10504201799631119,
0.10588235408067703, 0.10588235408067703), (0.10924369841814041,
0.10980392247438431, 0.10980392247438431), (0.11344537883996964,
0.11372549086809158, 0.11372549086809158), (0.11764705926179886,
0.11764705926179886, 0.11764705926179886), (0.12184873968362808,
0.12156862765550613, 0.12156862765550613), (0.1260504275560379,
0.12549020349979401, 0.12549020349979401), (0.13025210797786713,
0.12941177189350128, 0.12941177189350128), (0.13445378839969635,
0.13333334028720856, 0.13333334028720856), (0.13865546882152557,
0.13725490868091583, 0.13725490868091583), (0.1428571492433548,
0.14117647707462311, 0.14117647707462311), (0.14705882966518402,
0.14509804546833038, 0.14509804546833038), (0.15126051008701324,
0.14901961386203766, 0.14901961386203766), (0.15546219050884247,
0.15294118225574493, 0.15294118225574493), (0.15966387093067169,
0.16078431904315948, 0.16078431904315948), (0.16386555135250092,
0.16470588743686676, 0.16470588743686676), (0.16806723177433014,
0.16862745583057404, 0.16862745583057404), (0.17226891219615936,
0.17254902422428131, 0.17254902422428131), (0.17647059261798859,
0.17647059261798859, 0.17647059261798859), (0.18067227303981781,
0.18039216101169586, 0.18039216101169586), (0.18487395346164703,
0.18431372940540314, 0.18431372940540314), (0.18907563388347626,
0.18823529779911041, 0.18823529779911041), (0.19327731430530548,
0.19215686619281769, 0.19215686619281769), (0.1974789947271347,
0.19607843458652496, 0.19607843458652496), (0.20168067514896393,
0.20000000298023224, 0.20000000298023224), (0.20588235557079315,
0.20392157137393951, 0.20392157137393951), (0.21008403599262238,
0.20784313976764679, 0.20784313976764679), (0.2142857164144516,
0.21176470816135406, 0.21176470816135406), (0.21848739683628082,
0.21568627655506134, 0.21568627655506134), (0.22268907725811005,
0.22352941334247589, 0.22352941334247589), (0.22689075767993927,
0.22745098173618317, 0.22745098173618317), (0.23109243810176849,
0.23137255012989044, 0.23137255012989044), (0.23529411852359772,
0.23529411852359772, 0.23529411852359772), (0.23949579894542694,
0.23921568691730499, 0.23921568691730499), (0.24369747936725616,
0.24313725531101227, 0.24313725531101227), (0.24789915978908539,
0.24705882370471954, 0.24705882370471954), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28627452254295349, 0.28627452254295349), (0.28991597890853882,
0.29019609093666077, 0.29019609093666077), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.3490196168422699, 0.3490196168422699), (0.35294118523597717,
0.35294118523597717, 0.35294118523597717), (0.3571428656578064,
0.35686275362968445, 0.35686275362968445), (0.36134454607963562,
0.36078432202339172, 0.36078432202339172), (0.36554622650146484,
0.364705890417099, 0.364705890417099), (0.36974790692329407,
0.36862745881080627, 0.36862745881080627), (0.37394958734512329,
0.37254902720451355, 0.37254902720451355), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.4117647111415863, 0.4117647111415863), (0.41596639156341553,
0.41568627953529358, 0.41568627953529358), (0.42016807198524475,
0.41960784792900085, 0.41960784792900085), (0.42436975240707397,
0.42352941632270813, 0.42352941632270813), (0.4285714328289032,
0.42745098471641541, 0.42745098471641541), (0.43277311325073242,
0.43137255311012268, 0.43137255311012268), (0.43697479367256165,
0.43529412150382996, 0.43529412150382996), (0.44117647409439087,
0.43921568989753723, 0.43921568989753723), (0.44537815451622009,
0.44313725829124451, 0.44313725829124451), (0.44957983493804932,
0.44705882668495178, 0.44705882668495178), (0.45378151535987854,
0.45098039507865906, 0.45098039507865906), (0.45798319578170776,
0.45490196347236633, 0.45490196347236633), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47450980544090271, 0.47450980544090271), (0.47899159789085388,
0.47843137383460999, 0.47843137383460999), (0.48319327831268311,
0.48235294222831726, 0.48235294222831726), (0.48739495873451233,
0.48627451062202454, 0.48627451062202454), (0.49159663915634155,
0.49019607901573181, 0.49019607901573181), (0.49579831957817078,
0.49411764740943909, 0.49411764740943909), (0.5, 0.49803921580314636,
0.49803921580314636), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.5372549295425415,
0.5372549295425415), (0.54201680421829224, 0.54117649793624878,
0.54117649793624878), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.60392159223556519,
0.60392159223556519), (0.60924369096755981, 0.60784316062927246,
0.60784316062927246), (0.61344540119171143, 0.61176472902297974,
0.61176472902297974), (0.61764705181121826, 0.61568629741668701,
0.61568629741668701), (0.62184876203536987, 0.61960786581039429,
0.61960786581039429), (0.62605041265487671, 0.62352943420410156,
0.62352943420410156), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.66274511814117432,
0.66274511814117432), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67058825492858887,
0.67058825492858887), (0.67647057771682739, 0.67450982332229614,
0.67450982332229614), (0.680672287940979, 0.67843139171600342,
0.67843139171600342), (0.68487393856048584, 0.68235296010971069,
0.68235296010971069), (0.68907564878463745, 0.68627452850341797,
0.68627452850341797), (0.69327729940414429, 0.69019609689712524,
0.69019609689712524), (0.6974790096282959, 0.69411766529083252,
0.69411766529083252), (0.70168066024780273, 0.69803923368453979,
0.69803923368453979), (0.70588237047195435, 0.70196080207824707,
0.70196080207824707), (0.71008402109146118, 0.70588237047195435,
0.70588237047195435), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72549021244049072,
0.72549021244049072), (0.73109245300292969, 0.729411780834198,
0.729411780834198), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73725491762161255,
0.73725491762161255), (0.74369746446609497, 0.74117648601531982,
0.74117648601531982), (0.74789917469024658, 0.7450980544090271,
0.7450980544090271), (0.75210082530975342, 0.74901962280273438,
0.74901962280273438), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78823530673980713,
0.78823530673980713), (0.79411762952804565, 0.7921568751335144,
0.7921568751335144), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.85098040103912354,
0.85098040103912354), (0.8571428656578064, 0.85490196943283081,
0.85490196943283081), (0.86134451627731323, 0.85882353782653809,
0.85882353782653809), (0.86554622650146484, 0.86274510622024536,
0.86274510622024536), (0.86974787712097168, 0.86666667461395264,
0.86666667461395264), (0.87394958734512329, 0.87058824300765991,
0.87058824300765991), (0.87815123796463013, 0.87450981140136719,
0.87450981140136719), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.91372549533843994,
0.91372549533843994), (0.92016804218292236, 0.91764706373214722,
0.91764706373214722), (0.92436975240707397, 0.92156863212585449,
0.92156863212585449), (0.92857140302658081, 0.92549020051956177,
0.92549020051956177), (0.93277311325073242, 0.92941176891326904,
0.92941176891326904), (0.93697476387023926, 0.93333333730697632,
0.93333333730697632), (0.94117647409439087, 0.93725490570068359,
0.93725490570068359), (0.94537812471389771, 0.94117647409439087,
0.94117647409439087), (0.94957983493804932, 0.94509804248809814,
0.94509804248809814), (0.95378148555755615, 0.94901961088180542,
0.94901961088180542), (0.95798319578170776, 0.9529411792755127,
0.9529411792755127), (0.9621848464012146, 0.95686274766921997,
0.95686274766921997), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97647058963775635,
0.97647058963775635), (0.98319327831268311, 0.98039215803146362,
0.98039215803146362), (0.98739492893218994, 0.9843137264251709,
0.9843137264251709), (0.99159663915634155, 0.98823529481887817,
0.98823529481887817), (0.99579828977584839, 0.99215686321258545,
0.99215686321258545), (1.0, 0.99607843160629272, 0.99607843160629272)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.011764706112444401,
0.011764706112444401), (0.016806723549962044, 0.015686275437474251,
0.015686275437474251), (0.021008403971791267, 0.019607843831181526,
0.019607843831181526), (0.025210084393620491, 0.023529412224888802,
0.023529412224888802), (0.029411764815449715, 0.027450980618596077,
0.027450980618596077), (0.033613447099924088, 0.035294119268655777,
0.035294119268655777), (0.037815127521753311, 0.039215687662363052,
0.039215687662363052), (0.042016807943582535, 0.043137256056070328,
0.043137256056070328), (0.046218488365411758, 0.047058824449777603,
0.047058824449777603), (0.050420168787240982, 0.050980392843484879,
0.050980392843484879), (0.054621849209070206, 0.054901961237192154,
0.054901961237192154), (0.058823529630899429, 0.058823529630899429,
0.058823529630899429), (0.063025213778018951, 0.062745101749897003,
0.062745101749897003), (0.067226894199848175, 0.066666670143604279,
0.066666670143604279), (0.071428574621677399, 0.070588238537311554,
0.070588238537311554), (0.075630255043506622, 0.074509806931018829,
0.074509806931018829), (0.079831935465335846, 0.078431375324726105,
0.078431375324726105), (0.08403361588716507, 0.08235294371843338,
0.08235294371843338), (0.088235296308994293, 0.086274512112140656,
0.086274512112140656), (0.092436976730823517, 0.090196080505847931,
0.090196080505847931), (0.09663865715265274, 0.098039217293262482,
0.098039217293262482), (0.10084033757448196, 0.10196078568696976,
0.10196078568696976), (0.10504201799631119, 0.10588235408067703,
0.10588235408067703), (0.10924369841814041, 0.10980392247438431,
0.10980392247438431), (0.11344537883996964, 0.11372549086809158,
0.11372549086809158), (0.11764705926179886, 0.11764705926179886,
0.11764705926179886), (0.12184873968362808, 0.12156862765550613,
0.12156862765550613), (0.1260504275560379, 0.12549020349979401,
0.12549020349979401), (0.13025210797786713, 0.12941177189350128,
0.12941177189350128), (0.13445378839969635, 0.13333334028720856,
0.13333334028720856), (0.13865546882152557, 0.13725490868091583,
0.13725490868091583), (0.1428571492433548, 0.14117647707462311,
0.14117647707462311), (0.14705882966518402, 0.14509804546833038,
0.14509804546833038), (0.15126051008701324, 0.14901961386203766,
0.14901961386203766), (0.15546219050884247, 0.15294118225574493,
0.15294118225574493), (0.15966387093067169, 0.16078431904315948,
0.16078431904315948), (0.16386555135250092, 0.16470588743686676,
0.16470588743686676), (0.16806723177433014, 0.16862745583057404,
0.16862745583057404), (0.17226891219615936, 0.17254902422428131,
0.17254902422428131), (0.17647059261798859, 0.17647059261798859,
0.17647059261798859), (0.18067227303981781, 0.18039216101169586,
0.18039216101169586), (0.18487395346164703, 0.18431372940540314,
0.18431372940540314), (0.18907563388347626, 0.18823529779911041,
0.18823529779911041), (0.19327731430530548, 0.19215686619281769,
0.19215686619281769), (0.1974789947271347, 0.19607843458652496,
0.19607843458652496), (0.20168067514896393, 0.20000000298023224,
0.20000000298023224), (0.20588235557079315, 0.20392157137393951,
0.20392157137393951), (0.21008403599262238, 0.20784313976764679,
0.20784313976764679), (0.2142857164144516, 0.21176470816135406,
0.21176470816135406), (0.21848739683628082, 0.21568627655506134,
0.21568627655506134), (0.22268907725811005, 0.22352941334247589,
0.22352941334247589), (0.22689075767993927, 0.22745098173618317,
0.22745098173618317), (0.23109243810176849, 0.23137255012989044,
0.23137255012989044), (0.23529411852359772, 0.23529411852359772,
0.23529411852359772), (0.23949579894542694, 0.23921568691730499,
0.23921568691730499), (0.24369747936725616, 0.24313725531101227,
0.24313725531101227), (0.24789915978908539, 0.24705882370471954,
0.24705882370471954), (0.25210085511207581, 0.25098040699958801,
0.25098040699958801), (0.25630253553390503, 0.25490197539329529,
0.25490197539329529), (0.26050421595573425, 0.25882354378700256,
0.25882354378700256), (0.26470589637756348, 0.26274511218070984,
0.26274511218070984), (0.2689075767993927, 0.26666668057441711,
0.26666668057441711), (0.27310925722122192, 0.27058824896812439,
0.27058824896812439), (0.27731093764305115, 0.27450981736183167,
0.27450981736183167), (0.28151261806488037, 0.27843138575553894,
0.27843138575553894), (0.28571429848670959, 0.28627452254295349,
0.28627452254295349), (0.28991597890853882, 0.29019609093666077,
0.29019609093666077), (0.29411765933036804, 0.29411765933036804,
0.29411765933036804), (0.29831933975219727, 0.29803922772407532,
0.29803922772407532), (0.30252102017402649, 0.30196079611778259,
0.30196079611778259), (0.30672270059585571, 0.30588236451148987,
0.30588236451148987), (0.31092438101768494, 0.30980393290519714,
0.30980393290519714), (0.31512606143951416, 0.31372550129890442,
0.31372550129890442), (0.31932774186134338, 0.31764706969261169,
0.31764706969261169), (0.32352942228317261, 0.32156863808631897,
0.32156863808631897), (0.32773110270500183, 0.32549020648002625,
0.32549020648002625), (0.33193278312683105, 0.32941177487373352,
0.32941177487373352), (0.33613446354866028, 0.3333333432674408,
0.3333333432674408), (0.3403361439704895, 0.33725491166114807,
0.33725491166114807), (0.34453782439231873, 0.34117648005485535,
0.34117648005485535), (0.34873950481414795, 0.3490196168422699,
0.3490196168422699), (0.35294118523597717, 0.35294118523597717,
0.35294118523597717), (0.3571428656578064, 0.35686275362968445,
0.35686275362968445), (0.36134454607963562, 0.36078432202339172,
0.36078432202339172), (0.36554622650146484, 0.364705890417099,
0.364705890417099), (0.36974790692329407, 0.36862745881080627,
0.36862745881080627), (0.37394958734512329, 0.37254902720451355,
0.37254902720451355), (0.37815126776695251, 0.37647059559822083,
0.37647059559822083), (0.38235294818878174, 0.3803921639919281,
0.3803921639919281), (0.38655462861061096, 0.38431373238563538,
0.38431373238563538), (0.39075630903244019, 0.38823530077934265,
0.38823530077934265), (0.39495798945426941, 0.39215686917304993,
0.39215686917304993), (0.39915966987609863, 0.3960784375667572,
0.3960784375667572), (0.40336135029792786, 0.40000000596046448,
0.40000000596046448), (0.40756303071975708, 0.40392157435417175,
0.40392157435417175), (0.4117647111415863, 0.4117647111415863,
0.4117647111415863), (0.41596639156341553, 0.41568627953529358,
0.41568627953529358), (0.42016807198524475, 0.41960784792900085,
0.41960784792900085), (0.42436975240707397, 0.42352941632270813,
0.42352941632270813), (0.4285714328289032, 0.42745098471641541,
0.42745098471641541), (0.43277311325073242, 0.43137255311012268,
0.43137255311012268), (0.43697479367256165, 0.43529412150382996,
0.43529412150382996), (0.44117647409439087, 0.43921568989753723,
0.43921568989753723), (0.44537815451622009, 0.44313725829124451,
0.44313725829124451), (0.44957983493804932, 0.44705882668495178,
0.44705882668495178), (0.45378151535987854, 0.45098039507865906,
0.45098039507865906), (0.45798319578170776, 0.45490196347236633,
0.45490196347236633), (0.46218487620353699, 0.45882353186607361,
0.45882353186607361), (0.46638655662536621, 0.46274510025978088,
0.46274510025978088), (0.47058823704719543, 0.46666666865348816,
0.46666666865348816), (0.47478991746902466, 0.47450980544090271,
0.47450980544090271), (0.47899159789085388, 0.47843137383460999,
0.47843137383460999), (0.48319327831268311, 0.48235294222831726,
0.48235294222831726), (0.48739495873451233, 0.48627451062202454,
0.48627451062202454), (0.49159663915634155, 0.49019607901573181,
0.49019607901573181), (0.49579831957817078, 0.49411764740943909,
0.49411764740943909), (0.5, 0.49803921580314636, 0.49803921580314636),
(0.50420171022415161, 0.50196081399917603, 0.50196081399917603),
(0.50840336084365845, 0.5058823823928833, 0.5058823823928833),
(0.51260507106781006, 0.50980395078659058, 0.50980395078659058),
(0.51680672168731689, 0.51372551918029785, 0.51372551918029785),
(0.52100843191146851, 0.51764708757400513, 0.51764708757400513),
(0.52521008253097534, 0.5215686559677124, 0.5215686559677124),
(0.52941179275512695, 0.52549022436141968, 0.52549022436141968),
(0.53361344337463379, 0.52941179275512695, 0.52941179275512695),
(0.5378151535987854, 0.5372549295425415, 0.5372549295425415),
(0.54201680421829224, 0.54117649793624878, 0.54117649793624878),
(0.54621851444244385, 0.54509806632995605, 0.54509806632995605),
(0.55042016506195068, 0.54901963472366333, 0.54901963472366333),
(0.55462187528610229, 0.55294120311737061, 0.55294120311737061),
(0.55882352590560913, 0.55686277151107788, 0.55686277151107788),
(0.56302523612976074, 0.56078433990478516, 0.56078433990478516),
(0.56722688674926758, 0.56470590829849243, 0.56470590829849243),
(0.57142859697341919, 0.56862747669219971, 0.56862747669219971),
(0.57563024759292603, 0.57254904508590698, 0.57254904508590698),
(0.57983195781707764, 0.57647061347961426, 0.57647061347961426),
(0.58403360843658447, 0.58039218187332153, 0.58039218187332153),
(0.58823531866073608, 0.58431375026702881, 0.58431375026702881),
(0.59243696928024292, 0.58823531866073608, 0.58823531866073608),
(0.59663867950439453, 0.59215688705444336, 0.59215688705444336),
(0.60084033012390137, 0.60000002384185791, 0.60000002384185791),
(0.60504204034805298, 0.60392159223556519, 0.60392159223556519),
(0.60924369096755981, 0.60784316062927246, 0.60784316062927246),
(0.61344540119171143, 0.61176472902297974, 0.61176472902297974),
(0.61764705181121826, 0.61568629741668701, 0.61568629741668701),
(0.62184876203536987, 0.61960786581039429, 0.61960786581039429),
(0.62605041265487671, 0.62352943420410156, 0.62352943420410156),
(0.63025212287902832, 0.62745100259780884, 0.62745100259780884),
(0.63445377349853516, 0.63137257099151611, 0.63137257099151611),
(0.63865548372268677, 0.63529413938522339, 0.63529413938522339),
(0.6428571343421936, 0.63921570777893066, 0.63921570777893066),
(0.64705884456634521, 0.64313727617263794, 0.64313727617263794),
(0.65126049518585205, 0.64705884456634521, 0.64705884456634521),
(0.65546220541000366, 0.65098041296005249, 0.65098041296005249),
(0.6596638560295105, 0.65490198135375977, 0.65490198135375977),
(0.66386556625366211, 0.66274511814117432, 0.66274511814117432),
(0.66806721687316895, 0.66666668653488159, 0.66666668653488159),
(0.67226892709732056, 0.67058825492858887, 0.67058825492858887),
(0.67647057771682739, 0.67450982332229614, 0.67450982332229614),
(0.680672287940979, 0.67843139171600342, 0.67843139171600342),
(0.68487393856048584, 0.68235296010971069, 0.68235296010971069),
(0.68907564878463745, 0.68627452850341797, 0.68627452850341797),
(0.69327729940414429, 0.69019609689712524, 0.69019609689712524),
(0.6974790096282959, 0.69411766529083252, 0.69411766529083252),
(0.70168066024780273, 0.69803923368453979, 0.69803923368453979),
(0.70588237047195435, 0.70196080207824707, 0.70196080207824707),
(0.71008402109146118, 0.70588237047195435, 0.70588237047195435),
(0.71428573131561279, 0.70980393886566162, 0.70980393886566162),
(0.71848738193511963, 0.7137255072593689, 0.7137255072593689),
(0.72268909215927124, 0.71764707565307617, 0.71764707565307617),
(0.72689074277877808, 0.72549021244049072, 0.72549021244049072),
(0.73109245300292969, 0.729411780834198, 0.729411780834198),
(0.73529410362243652, 0.73333334922790527, 0.73333334922790527),
(0.73949581384658813, 0.73725491762161255, 0.73725491762161255),
(0.74369746446609497, 0.74117648601531982, 0.74117648601531982),
(0.74789917469024658, 0.7450980544090271, 0.7450980544090271),
(0.75210082530975342, 0.74901962280273438, 0.74901962280273438),
(0.75630253553390503, 0.75294119119644165, 0.75294119119644165),
(0.76050418615341187, 0.75686275959014893, 0.75686275959014893),
(0.76470589637756348, 0.7607843279838562, 0.7607843279838562),
(0.76890754699707031, 0.76470589637756348, 0.76470589637756348),
(0.77310925722122192, 0.76862746477127075, 0.76862746477127075),
(0.77731090784072876, 0.77254903316497803, 0.77254903316497803),
(0.78151261806488037, 0.7764706015586853, 0.7764706015586853),
(0.78571426868438721, 0.78039216995239258, 0.78039216995239258),
(0.78991597890853882, 0.78823530673980713, 0.78823530673980713),
(0.79411762952804565, 0.7921568751335144, 0.7921568751335144),
(0.79831933975219727, 0.79607844352722168, 0.79607844352722168),
(0.8025209903717041, 0.80000001192092896, 0.80000001192092896),
(0.80672270059585571, 0.80392158031463623, 0.80392158031463623),
(0.81092435121536255, 0.80784314870834351, 0.80784314870834351),
(0.81512606143951416, 0.81176471710205078, 0.81176471710205078),
(0.819327712059021, 0.81568628549575806, 0.81568628549575806),
(0.82352942228317261, 0.81960785388946533, 0.81960785388946533),
(0.82773107290267944, 0.82352942228317261, 0.82352942228317261),
(0.83193278312683105, 0.82745099067687988, 0.82745099067687988),
(0.83613443374633789, 0.83137255907058716, 0.83137255907058716),
(0.8403361439704895, 0.83529412746429443, 0.83529412746429443),
(0.84453779458999634, 0.83921569585800171, 0.83921569585800171),
(0.84873950481414795, 0.84313726425170898, 0.84313726425170898),
(0.85294115543365479, 0.85098040103912354, 0.85098040103912354),
(0.8571428656578064, 0.85490196943283081, 0.85490196943283081),
(0.86134451627731323, 0.85882353782653809, 0.85882353782653809),
(0.86554622650146484, 0.86274510622024536, 0.86274510622024536),
(0.86974787712097168, 0.86666667461395264, 0.86666667461395264),
(0.87394958734512329, 0.87058824300765991, 0.87058824300765991),
(0.87815123796463013, 0.87450981140136719, 0.87450981140136719),
(0.88235294818878174, 0.87843137979507446, 0.87843137979507446),
(0.88655459880828857, 0.88235294818878174, 0.88235294818878174),
(0.89075630903244019, 0.88627451658248901, 0.88627451658248901),
(0.89495795965194702, 0.89019608497619629, 0.89019608497619629),
(0.89915966987609863, 0.89411765336990356, 0.89411765336990356),
(0.90336132049560547, 0.89803922176361084, 0.89803922176361084),
(0.90756303071975708, 0.90196079015731812, 0.90196079015731812),
(0.91176468133926392, 0.90588235855102539, 0.90588235855102539),
(0.91596639156341553, 0.91372549533843994, 0.91372549533843994),
(0.92016804218292236, 0.91764706373214722, 0.91764706373214722),
(0.92436975240707397, 0.92156863212585449, 0.92156863212585449),
(0.92857140302658081, 0.92549020051956177, 0.92549020051956177),
(0.93277311325073242, 0.92941176891326904, 0.92941176891326904),
(0.93697476387023926, 0.93333333730697632, 0.93333333730697632),
(0.94117647409439087, 0.93725490570068359, 0.93725490570068359),
(0.94537812471389771, 0.94117647409439087, 0.94117647409439087),
(0.94957983493804932, 0.94509804248809814, 0.94509804248809814),
(0.95378148555755615, 0.94901961088180542, 0.94901961088180542),
(0.95798319578170776, 0.9529411792755127, 0.9529411792755127),
(0.9621848464012146, 0.95686274766921997, 0.95686274766921997),
(0.96638655662536621, 0.96078431606292725, 0.96078431606292725),
(0.97058820724487305, 0.96470588445663452, 0.96470588445663452),
(0.97478991746902466, 0.9686274528503418, 0.9686274528503418),
(0.97899156808853149, 0.97647058963775635, 0.97647058963775635),
(0.98319327831268311, 0.98039215803146362, 0.98039215803146362),
(0.98739492893218994, 0.9843137264251709, 0.9843137264251709),
(0.99159663915634155, 0.98823529481887817, 0.98823529481887817),
(0.99579828977584839, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)], 'red': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.035294119268655777, 0.035294119268655777),
(0.037815127521753311, 0.039215687662363052, 0.039215687662363052),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.098039217293262482, 0.098039217293262482),
(0.10084033757448196, 0.10196078568696976, 0.10196078568696976),
(0.10504201799631119, 0.10588235408067703, 0.10588235408067703),
(0.10924369841814041, 0.10980392247438431, 0.10980392247438431),
(0.11344537883996964, 0.11372549086809158, 0.11372549086809158),
(0.11764705926179886, 0.11764705926179886, 0.11764705926179886),
(0.12184873968362808, 0.12156862765550613, 0.12156862765550613),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.16078431904315948, 0.16078431904315948),
(0.16386555135250092, 0.16470588743686676, 0.16470588743686676),
(0.16806723177433014, 0.16862745583057404, 0.16862745583057404),
(0.17226891219615936, 0.17254902422428131, 0.17254902422428131),
(0.17647059261798859, 0.17647059261798859, 0.17647059261798859),
(0.18067227303981781, 0.18039216101169586, 0.18039216101169586),
(0.18487395346164703, 0.18431372940540314, 0.18431372940540314),
(0.18907563388347626, 0.18823529779911041, 0.18823529779911041),
(0.19327731430530548, 0.19215686619281769, 0.19215686619281769),
(0.1974789947271347, 0.19607843458652496, 0.19607843458652496),
(0.20168067514896393, 0.20000000298023224, 0.20000000298023224),
(0.20588235557079315, 0.20392157137393951, 0.20392157137393951),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.22352941334247589, 0.22352941334247589),
(0.22689075767993927, 0.22745098173618317, 0.22745098173618317),
(0.23109243810176849, 0.23137255012989044, 0.23137255012989044),
(0.23529411852359772, 0.23529411852359772, 0.23529411852359772),
(0.23949579894542694, 0.23921568691730499, 0.23921568691730499),
(0.24369747936725616, 0.24313725531101227, 0.24313725531101227),
(0.24789915978908539, 0.24705882370471954, 0.24705882370471954),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28627452254295349, 0.28627452254295349),
(0.28991597890853882, 0.29019609093666077, 0.29019609093666077),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.3490196168422699, 0.3490196168422699),
(0.35294118523597717, 0.35294118523597717, 0.35294118523597717),
(0.3571428656578064, 0.35686275362968445, 0.35686275362968445),
(0.36134454607963562, 0.36078432202339172, 0.36078432202339172),
(0.36554622650146484, 0.364705890417099, 0.364705890417099),
(0.36974790692329407, 0.36862745881080627, 0.36862745881080627),
(0.37394958734512329, 0.37254902720451355, 0.37254902720451355),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.4117647111415863, 0.4117647111415863),
(0.41596639156341553, 0.41568627953529358, 0.41568627953529358),
(0.42016807198524475, 0.41960784792900085, 0.41960784792900085),
(0.42436975240707397, 0.42352941632270813, 0.42352941632270813),
(0.4285714328289032, 0.42745098471641541, 0.42745098471641541),
(0.43277311325073242, 0.43137255311012268, 0.43137255311012268),
(0.43697479367256165, 0.43529412150382996, 0.43529412150382996),
(0.44117647409439087, 0.43921568989753723, 0.43921568989753723),
(0.44537815451622009, 0.44313725829124451, 0.44313725829124451),
(0.44957983493804932, 0.44705882668495178, 0.44705882668495178),
(0.45378151535987854, 0.45098039507865906, 0.45098039507865906),
(0.45798319578170776, 0.45490196347236633, 0.45490196347236633),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47450980544090271, 0.47450980544090271),
(0.47899159789085388, 0.47843137383460999, 0.47843137383460999),
(0.48319327831268311, 0.48235294222831726, 0.48235294222831726),
(0.48739495873451233, 0.48627451062202454, 0.48627451062202454),
(0.49159663915634155, 0.49019607901573181, 0.49019607901573181),
(0.49579831957817078, 0.49411764740943909, 0.49411764740943909), (0.5,
0.49803921580314636, 0.49803921580314636), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.5372549295425415, 0.5372549295425415), (0.54201680421829224,
0.54117649793624878, 0.54117649793624878), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.60000002384185791, 0.60000002384185791), (0.60504204034805298,
0.60392159223556519, 0.60392159223556519), (0.60924369096755981,
0.60784316062927246, 0.60784316062927246), (0.61344540119171143,
0.61176472902297974, 0.61176472902297974), (0.61764705181121826,
0.61568629741668701, 0.61568629741668701), (0.62184876203536987,
0.61960786581039429, 0.61960786581039429), (0.62605041265487671,
0.62352943420410156, 0.62352943420410156), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.66274511814117432, 0.66274511814117432), (0.66806721687316895,
0.66666668653488159, 0.66666668653488159), (0.67226892709732056,
0.67058825492858887, 0.67058825492858887), (0.67647057771682739,
0.67450982332229614, 0.67450982332229614), (0.680672287940979,
0.67843139171600342, 0.67843139171600342), (0.68487393856048584,
0.68235296010971069, 0.68235296010971069), (0.68907564878463745,
0.68627452850341797, 0.68627452850341797), (0.69327729940414429,
0.69019609689712524, 0.69019609689712524), (0.6974790096282959,
0.69411766529083252, 0.69411766529083252), (0.70168066024780273,
0.69803923368453979, 0.69803923368453979), (0.70588237047195435,
0.70196080207824707, 0.70196080207824707), (0.71008402109146118,
0.70588237047195435, 0.70588237047195435), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72549021244049072, 0.72549021244049072), (0.73109245300292969,
0.729411780834198, 0.729411780834198), (0.73529410362243652,
0.73333334922790527, 0.73333334922790527), (0.73949581384658813,
0.73725491762161255, 0.73725491762161255), (0.74369746446609497,
0.74117648601531982, 0.74117648601531982), (0.74789917469024658,
0.7450980544090271, 0.7450980544090271), (0.75210082530975342,
0.74901962280273438, 0.74901962280273438), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78823530673980713, 0.78823530673980713), (0.79411762952804565,
0.7921568751335144, 0.7921568751335144), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.85098040103912354, 0.85098040103912354), (0.8571428656578064,
0.85490196943283081, 0.85490196943283081), (0.86134451627731323,
0.85882353782653809, 0.85882353782653809), (0.86554622650146484,
0.86274510622024536, 0.86274510622024536), (0.86974787712097168,
0.86666667461395264, 0.86666667461395264), (0.87394958734512329,
0.87058824300765991, 0.87058824300765991), (0.87815123796463013,
0.87450981140136719, 0.87450981140136719), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.91372549533843994, 0.91372549533843994), (0.92016804218292236,
0.91764706373214722, 0.91764706373214722), (0.92436975240707397,
0.92156863212585449, 0.92156863212585449), (0.92857140302658081,
0.92549020051956177, 0.92549020051956177), (0.93277311325073242,
0.92941176891326904, 0.92941176891326904), (0.93697476387023926,
0.93333333730697632, 0.93333333730697632), (0.94117647409439087,
0.93725490570068359, 0.93725490570068359), (0.94537812471389771,
0.94117647409439087, 0.94117647409439087), (0.94957983493804932,
0.94509804248809814, 0.94509804248809814), (0.95378148555755615,
0.94901961088180542, 0.94901961088180542), (0.95798319578170776,
0.9529411792755127, 0.9529411792755127), (0.9621848464012146,
0.95686274766921997, 0.95686274766921997), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97647058963775635, 0.97647058963775635), (0.98319327831268311,
0.98039215803146362, 0.98039215803146362), (0.98739492893218994,
0.9843137264251709, 0.9843137264251709), (0.99159663915634155,
0.98823529481887817, 0.98823529481887817), (0.99579828977584839,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)]}
_gist_heat_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388, 0.0, 0.0),
(0.48319327831268311, 0.0, 0.0), (0.48739495873451233, 0.0, 0.0),
(0.49159663915634155, 0.0, 0.0), (0.49579831957817078, 0.0, 0.0), (0.5,
0.0, 0.0), (0.50420171022415161, 0.0, 0.0), (0.50840336084365845, 0.0,
0.0), (0.51260507106781006, 0.0, 0.0), (0.51680672168731689, 0.0, 0.0),
(0.52100843191146851, 0.0, 0.0), (0.52521008253097534, 0.0, 0.0),
(0.52941179275512695, 0.0, 0.0), (0.53361344337463379, 0.0, 0.0),
(0.5378151535987854, 0.0, 0.0), (0.54201680421829224, 0.0, 0.0),
(0.54621851444244385, 0.0, 0.0), (0.55042016506195068, 0.0, 0.0),
(0.55462187528610229, 0.0, 0.0), (0.55882352590560913, 0.0, 0.0),
(0.56302523612976074, 0.0, 0.0), (0.56722688674926758, 0.0, 0.0),
(0.57142859697341919, 0.0, 0.0), (0.57563024759292603, 0.0, 0.0),
(0.57983195781707764, 0.0, 0.0), (0.58403360843658447, 0.0, 0.0),
(0.58823531866073608, 0.0, 0.0), (0.59243696928024292, 0.0, 0.0),
(0.59663867950439453, 0.0, 0.0), (0.60084033012390137, 0.0, 0.0),
(0.60504204034805298, 0.0, 0.0), (0.60924369096755981, 0.0, 0.0),
(0.61344540119171143, 0.0, 0.0), (0.61764705181121826, 0.0, 0.0),
(0.62184876203536987, 0.0, 0.0), (0.62605041265487671, 0.0, 0.0),
(0.63025212287902832, 0.0, 0.0), (0.63445377349853516, 0.0, 0.0),
(0.63865548372268677, 0.0, 0.0), (0.6428571343421936, 0.0, 0.0),
(0.64705884456634521, 0.0, 0.0), (0.65126049518585205, 0.0, 0.0),
(0.65546220541000366, 0.0, 0.0), (0.6596638560295105, 0.0, 0.0),
(0.66386556625366211, 0.0, 0.0), (0.66806721687316895, 0.0, 0.0),
(0.67226892709732056, 0.0, 0.0), (0.67647057771682739, 0.0, 0.0),
(0.680672287940979, 0.0, 0.0), (0.68487393856048584, 0.0, 0.0),
(0.68907564878463745, 0.0, 0.0), (0.69327729940414429, 0.0, 0.0),
(0.6974790096282959, 0.0, 0.0), (0.70168066024780273, 0.0, 0.0),
(0.70588237047195435, 0.0, 0.0), (0.71008402109146118, 0.0, 0.0),
(0.71428573131561279, 0.0, 0.0), (0.71848738193511963, 0.0, 0.0),
(0.72268909215927124, 0.0, 0.0), (0.72689074277877808, 0.0, 0.0),
(0.73109245300292969, 0.0, 0.0), (0.73529410362243652, 0.0, 0.0),
(0.73949581384658813, 0.0, 0.0), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.0, 0.0), (0.75210082530975342, 0.0, 0.0),
(0.75630253553390503, 0.027450980618596077, 0.027450980618596077),
(0.76050418615341187, 0.043137256056070328, 0.043137256056070328),
(0.76470589637756348, 0.058823529630899429, 0.058823529630899429),
(0.76890754699707031, 0.074509806931018829, 0.074509806931018829),
(0.77310925722122192, 0.090196080505847931, 0.090196080505847931),
(0.77731090784072876, 0.10588235408067703, 0.10588235408067703),
(0.78151261806488037, 0.12156862765550613, 0.12156862765550613),
(0.78571426868438721, 0.13725490868091583, 0.13725490868091583),
(0.78991597890853882, 0.15294118225574493, 0.15294118225574493),
(0.79411762952804565, 0.16862745583057404, 0.16862745583057404),
(0.79831933975219727, 0.20000000298023224, 0.20000000298023224),
(0.8025209903717041, 0.21176470816135406, 0.21176470816135406),
(0.80672270059585571, 0.22745098173618317, 0.22745098173618317),
(0.81092435121536255, 0.24313725531101227, 0.24313725531101227),
(0.81512606143951416, 0.25882354378700256, 0.25882354378700256),
(0.819327712059021, 0.27450981736183167, 0.27450981736183167),
(0.82352942228317261, 0.29019609093666077, 0.29019609093666077),
(0.82773107290267944, 0.30588236451148987, 0.30588236451148987),
(0.83193278312683105, 0.32156863808631897, 0.32156863808631897),
(0.83613443374633789, 0.33725491166114807, 0.33725491166114807),
(0.8403361439704895, 0.35294118523597717, 0.35294118523597717),
(0.84453779458999634, 0.36862745881080627, 0.36862745881080627),
(0.84873950481414795, 0.38431373238563538, 0.38431373238563538),
(0.85294115543365479, 0.40000000596046448, 0.40000000596046448),
(0.8571428656578064, 0.4117647111415863, 0.4117647111415863),
(0.86134451627731323, 0.42745098471641541, 0.42745098471641541),
(0.86554622650146484, 0.44313725829124451, 0.44313725829124451),
(0.86974787712097168, 0.45882353186607361, 0.45882353186607361),
(0.87394958734512329, 0.47450980544090271, 0.47450980544090271),
(0.87815123796463013, 0.49019607901573181, 0.49019607901573181),
(0.88235294818878174, 0.5215686559677124, 0.5215686559677124),
(0.88655459880828857, 0.5372549295425415, 0.5372549295425415),
(0.89075630903244019, 0.55294120311737061, 0.55294120311737061),
(0.89495795965194702, 0.56862747669219971, 0.56862747669219971),
(0.89915966987609863, 0.58431375026702881, 0.58431375026702881),
(0.90336132049560547, 0.60000002384185791, 0.60000002384185791),
(0.90756303071975708, 0.61176472902297974, 0.61176472902297974),
(0.91176468133926392, 0.62745100259780884, 0.62745100259780884),
(0.91596639156341553, 0.64313727617263794, 0.64313727617263794),
(0.92016804218292236, 0.65882354974746704, 0.65882354974746704),
(0.92436975240707397, 0.67450982332229614, 0.67450982332229614),
(0.92857140302658081, 0.69019609689712524, 0.69019609689712524),
(0.93277311325073242, 0.70588237047195435, 0.70588237047195435),
(0.93697476387023926, 0.72156864404678345, 0.72156864404678345),
(0.94117647409439087, 0.73725491762161255, 0.73725491762161255),
(0.94537812471389771, 0.75294119119644165, 0.75294119119644165),
(0.94957983493804932, 0.76862746477127075, 0.76862746477127075),
(0.95378148555755615, 0.78431373834609985, 0.78431373834609985),
(0.95798319578170776, 0.80000001192092896, 0.80000001192092896),
(0.9621848464012146, 0.81176471710205078, 0.81176471710205078),
(0.96638655662536621, 0.84313726425170898, 0.84313726425170898),
(0.97058820724487305, 0.85882353782653809, 0.85882353782653809),
(0.97478991746902466, 0.87450981140136719, 0.87450981140136719),
(0.97899156808853149, 0.89019608497619629, 0.89019608497619629),
(0.98319327831268311, 0.90588235855102539, 0.90588235855102539),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388,
0.0039215688593685627, 0.0039215688593685627), (0.48319327831268311,
0.011764706112444401, 0.011764706112444401), (0.48739495873451233,
0.019607843831181526, 0.019607843831181526), (0.49159663915634155,
0.027450980618596077, 0.027450980618596077), (0.49579831957817078,
0.035294119268655777, 0.035294119268655777), (0.5, 0.043137256056070328,
0.043137256056070328), (0.50420171022415161, 0.058823529630899429,
0.058823529630899429), (0.50840336084365845, 0.066666670143604279,
0.066666670143604279), (0.51260507106781006, 0.070588238537311554,
0.070588238537311554), (0.51680672168731689, 0.078431375324726105,
0.078431375324726105), (0.52100843191146851, 0.086274512112140656,
0.086274512112140656), (0.52521008253097534, 0.094117648899555206,
0.094117648899555206), (0.52941179275512695, 0.10196078568696976,
0.10196078568696976), (0.53361344337463379, 0.10980392247438431,
0.10980392247438431), (0.5378151535987854, 0.11764705926179886,
0.11764705926179886), (0.54201680421829224, 0.12549020349979401,
0.12549020349979401), (0.54621851444244385, 0.13725490868091583,
0.13725490868091583), (0.55042016506195068, 0.14509804546833038,
0.14509804546833038), (0.55462187528610229, 0.15294118225574493,
0.15294118225574493), (0.55882352590560913, 0.16078431904315948,
0.16078431904315948), (0.56302523612976074, 0.16862745583057404,
0.16862745583057404), (0.56722688674926758, 0.17647059261798859,
0.17647059261798859), (0.57142859697341919, 0.18431372940540314,
0.18431372940540314), (0.57563024759292603, 0.19215686619281769,
0.19215686619281769), (0.57983195781707764, 0.20000000298023224,
0.20000000298023224), (0.58403360843658447, 0.20392157137393951,
0.20392157137393951), (0.58823531866073608, 0.21176470816135406,
0.21176470816135406), (0.59243696928024292, 0.21960784494876862,
0.21960784494876862), (0.59663867950439453, 0.22745098173618317,
0.22745098173618317), (0.60084033012390137, 0.23529411852359772,
0.23529411852359772), (0.60504204034805298, 0.24313725531101227,
0.24313725531101227), (0.60924369096755981, 0.25098040699958801,
0.25098040699958801), (0.61344540119171143, 0.25882354378700256,
0.25882354378700256), (0.61764705181121826, 0.26666668057441711,
0.26666668057441711), (0.62184876203536987, 0.27058824896812439,
0.27058824896812439), (0.62605041265487671, 0.27843138575553894,
0.27843138575553894), (0.63025212287902832, 0.29411765933036804,
0.29411765933036804), (0.63445377349853516, 0.30196079611778259,
0.30196079611778259), (0.63865548372268677, 0.30980393290519714,
0.30980393290519714), (0.6428571343421936, 0.31764706969261169,
0.31764706969261169), (0.64705884456634521, 0.32549020648002625,
0.32549020648002625), (0.65126049518585205, 0.3333333432674408,
0.3333333432674408), (0.65546220541000366, 0.33725491166114807,
0.33725491166114807), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.35294118523597717,
0.35294118523597717), (0.66806721687316895, 0.36078432202339172,
0.36078432202339172), (0.67226892709732056, 0.36862745881080627,
0.36862745881080627), (0.67647057771682739, 0.37647059559822083,
0.37647059559822083), (0.680672287940979, 0.38431373238563538,
0.38431373238563538), (0.68487393856048584, 0.39215686917304993,
0.39215686917304993), (0.68907564878463745, 0.40000000596046448,
0.40000000596046448), (0.69327729940414429, 0.40392157435417175,
0.40392157435417175), (0.6974790096282959, 0.4117647111415863,
0.4117647111415863), (0.70168066024780273, 0.41960784792900085,
0.41960784792900085), (0.70588237047195435, 0.42745098471641541,
0.42745098471641541), (0.71008402109146118, 0.43529412150382996,
0.43529412150382996), (0.71428573131561279, 0.45098039507865906,
0.45098039507865906), (0.71848738193511963, 0.45882353186607361,
0.45882353186607361), (0.72268909215927124, 0.46666666865348816,
0.46666666865348816), (0.72689074277877808, 0.47058823704719543,
0.47058823704719543), (0.73109245300292969, 0.47843137383460999,
0.47843137383460999), (0.73529410362243652, 0.48627451062202454,
0.48627451062202454), (0.73949581384658813, 0.49411764740943909,
0.49411764740943909), (0.74369746446609497, 0.50196081399917603,
0.50196081399917603), (0.74789917469024658, 0.50980395078659058,
0.50980395078659058), (0.75210082530975342, 0.51764708757400513,
0.51764708757400513), (0.75630253553390503, 0.53333336114883423,
0.53333336114883423), (0.76050418615341187, 0.5372549295425415,
0.5372549295425415), (0.76470589637756348, 0.54509806632995605,
0.54509806632995605), (0.76890754699707031, 0.55294120311737061,
0.55294120311737061), (0.77310925722122192, 0.56078433990478516,
0.56078433990478516), (0.77731090784072876, 0.56862747669219971,
0.56862747669219971), (0.78151261806488037, 0.57647061347961426,
0.57647061347961426), (0.78571426868438721, 0.58431375026702881,
0.58431375026702881), (0.78991597890853882, 0.59215688705444336,
0.59215688705444336), (0.79411762952804565, 0.60000002384185791,
0.60000002384185791), (0.79831933975219727, 0.61176472902297974,
0.61176472902297974), (0.8025209903717041, 0.61960786581039429,
0.61960786581039429), (0.80672270059585571, 0.62745100259780884,
0.62745100259780884), (0.81092435121536255, 0.63529413938522339,
0.63529413938522339), (0.81512606143951416, 0.64313727617263794,
0.64313727617263794), (0.819327712059021, 0.65098041296005249,
0.65098041296005249), (0.82352942228317261, 0.65882354974746704,
0.65882354974746704), (0.82773107290267944, 0.66666668653488159,
0.66666668653488159), (0.83193278312683105, 0.67058825492858887,
0.67058825492858887), (0.83613443374633789, 0.67843139171600342,
0.67843139171600342), (0.8403361439704895, 0.68627452850341797,
0.68627452850341797), (0.84453779458999634, 0.69411766529083252,
0.69411766529083252), (0.84873950481414795, 0.70196080207824707,
0.70196080207824707), (0.85294115543365479, 0.70980393886566162,
0.70980393886566162), (0.8571428656578064, 0.71764707565307617,
0.71764707565307617), (0.86134451627731323, 0.72549021244049072,
0.72549021244049072), (0.86554622650146484, 0.73333334922790527,
0.73333334922790527), (0.86974787712097168, 0.73725491762161255,
0.73725491762161255), (0.87394958734512329, 0.7450980544090271,
0.7450980544090271), (0.87815123796463013, 0.75294119119644165,
0.75294119119644165), (0.88235294818878174, 0.76862746477127075,
0.76862746477127075), (0.88655459880828857, 0.7764706015586853,
0.7764706015586853), (0.89075630903244019, 0.78431373834609985,
0.78431373834609985), (0.89495795965194702, 0.7921568751335144,
0.7921568751335144), (0.89915966987609863, 0.80000001192092896,
0.80000001192092896), (0.90336132049560547, 0.80392158031463623,
0.80392158031463623), (0.90756303071975708, 0.81176471710205078,
0.81176471710205078), (0.91176468133926392, 0.81960785388946533,
0.81960785388946533), (0.91596639156341553, 0.82745099067687988,
0.82745099067687988), (0.92016804218292236, 0.83529412746429443,
0.83529412746429443), (0.92436975240707397, 0.84313726425170898,
0.84313726425170898), (0.92857140302658081, 0.85098040103912354,
0.85098040103912354), (0.93277311325073242, 0.85882353782653809,
0.85882353782653809), (0.93697476387023926, 0.86666667461395264,
0.86666667461395264), (0.94117647409439087, 0.87058824300765991,
0.87058824300765991), (0.94537812471389771, 0.87843137979507446,
0.87843137979507446), (0.94957983493804932, 0.88627451658248901,
0.88627451658248901), (0.95378148555755615, 0.89411765336990356,
0.89411765336990356), (0.95798319578170776, 0.90196079015731812,
0.90196079015731812), (0.9621848464012146, 0.90980392694473267,
0.90980392694473267), (0.96638655662536621, 0.92549020051956177,
0.92549020051956177), (0.97058820724487305, 0.93333333730697632,
0.93333333730697632), (0.97478991746902466, 0.93725490570068359,
0.93725490570068359), (0.97899156808853149, 0.94509804248809814,
0.94509804248809814), (0.98319327831268311, 0.9529411792755127,
0.9529411792755127), (0.98739492893218994, 0.96078431606292725,
0.96078431606292725), (0.99159663915634155, 0.9686274528503418,
0.9686274528503418), (0.99579828977584839, 0.97647058963775635,
0.97647058963775635), (1.0, 0.9843137264251709, 0.9843137264251709)],
'red': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.015686275437474251,
0.015686275437474251), (0.016806723549962044, 0.019607843831181526,
0.019607843831181526), (0.021008403971791267, 0.027450980618596077,
0.027450980618596077), (0.025210084393620491, 0.031372550874948502,
0.031372550874948502), (0.029411764815449715, 0.039215687662363052,
0.039215687662363052), (0.033613447099924088, 0.043137256056070328,
0.043137256056070328), (0.037815127521753311, 0.050980392843484879,
0.050980392843484879), (0.042016807943582535, 0.058823529630899429,
0.058823529630899429), (0.046218488365411758, 0.066666670143604279,
0.066666670143604279), (0.050420168787240982, 0.070588238537311554,
0.070588238537311554), (0.054621849209070206, 0.078431375324726105,
0.078431375324726105), (0.058823529630899429, 0.08235294371843338,
0.08235294371843338), (0.063025213778018951, 0.090196080505847931,
0.090196080505847931), (0.067226894199848175, 0.094117648899555206,
0.094117648899555206), (0.071428574621677399, 0.10196078568696976,
0.10196078568696976), (0.075630255043506622, 0.10588235408067703,
0.10588235408067703), (0.079831935465335846, 0.10980392247438431,
0.10980392247438431), (0.08403361588716507, 0.11764705926179886,
0.11764705926179886), (0.088235296308994293, 0.12156862765550613,
0.12156862765550613), (0.092436976730823517, 0.12941177189350128,
0.12941177189350128), (0.09663865715265274, 0.13333334028720856,
0.13333334028720856), (0.10084033757448196, 0.14117647707462311,
0.14117647707462311), (0.10504201799631119, 0.14509804546833038,
0.14509804546833038), (0.10924369841814041, 0.15294118225574493,
0.15294118225574493), (0.11344537883996964, 0.15686275064945221,
0.15686275064945221), (0.11764705926179886, 0.16470588743686676,
0.16470588743686676), (0.12184873968362808, 0.16862745583057404,
0.16862745583057404), (0.1260504275560379, 0.18039216101169586,
0.18039216101169586), (0.13025210797786713, 0.18431372940540314,
0.18431372940540314), (0.13445378839969635, 0.19215686619281769,
0.19215686619281769), (0.13865546882152557, 0.19607843458652496,
0.19607843458652496), (0.1428571492433548, 0.20392157137393951,
0.20392157137393951), (0.14705882966518402, 0.20784313976764679,
0.20784313976764679), (0.15126051008701324, 0.21568627655506134,
0.21568627655506134), (0.15546219050884247, 0.21960784494876862,
0.21960784494876862), (0.15966387093067169, 0.22352941334247589,
0.22352941334247589), (0.16386555135250092, 0.23137255012989044,
0.23137255012989044), (0.16806723177433014, 0.23529411852359772,
0.23529411852359772), (0.17226891219615936, 0.24313725531101227,
0.24313725531101227), (0.17647059261798859, 0.24705882370471954,
0.24705882370471954), (0.18067227303981781, 0.25490197539329529,
0.25490197539329529), (0.18487395346164703, 0.25882354378700256,
0.25882354378700256), (0.18907563388347626, 0.26666668057441711,
0.26666668057441711), (0.19327731430530548, 0.27058824896812439,
0.27058824896812439), (0.1974789947271347, 0.27450981736183167,
0.27450981736183167), (0.20168067514896393, 0.28235295414924622,
0.28235295414924622), (0.20588235557079315, 0.28627452254295349,
0.28627452254295349), (0.21008403599262238, 0.29803922772407532,
0.29803922772407532), (0.2142857164144516, 0.30588236451148987,
0.30588236451148987), (0.21848739683628082, 0.30980393290519714,
0.30980393290519714), (0.22268907725811005, 0.31764706969261169,
0.31764706969261169), (0.22689075767993927, 0.32156863808631897,
0.32156863808631897), (0.23109243810176849, 0.32941177487373352,
0.32941177487373352), (0.23529411852359772, 0.3333333432674408,
0.3333333432674408), (0.23949579894542694, 0.33725491166114807,
0.33725491166114807), (0.24369747936725616, 0.34509804844856262,
0.34509804844856262), (0.24789915978908539, 0.3490196168422699,
0.3490196168422699), (0.25210085511207581, 0.36078432202339172,
0.36078432202339172), (0.25630253553390503, 0.36862745881080627,
0.36862745881080627), (0.26050421595573425, 0.37254902720451355,
0.37254902720451355), (0.26470589637756348, 0.3803921639919281,
0.3803921639919281), (0.2689075767993927, 0.38431373238563538,
0.38431373238563538), (0.27310925722122192, 0.38823530077934265,
0.38823530077934265), (0.27731093764305115, 0.3960784375667572,
0.3960784375667572), (0.28151261806488037, 0.40000000596046448,
0.40000000596046448), (0.28571429848670959, 0.40784314274787903,
0.40784314274787903), (0.28991597890853882, 0.4117647111415863,
0.4117647111415863), (0.29411765933036804, 0.42352941632270813,
0.42352941632270813), (0.29831933975219727, 0.43137255311012268,
0.43137255311012268), (0.30252102017402649, 0.43529412150382996,
0.43529412150382996), (0.30672270059585571, 0.44313725829124451,
0.44313725829124451), (0.31092438101768494, 0.44705882668495178,
0.44705882668495178), (0.31512606143951416, 0.45098039507865906,
0.45098039507865906), (0.31932774186134338, 0.45882353186607361,
0.45882353186607361), (0.32352942228317261, 0.46274510025978088,
0.46274510025978088), (0.32773110270500183, 0.47058823704719543,
0.47058823704719543), (0.33193278312683105, 0.47450980544090271,
0.47450980544090271), (0.33613446354866028, 0.48235294222831726,
0.48235294222831726), (0.3403361439704895, 0.48627451062202454,
0.48627451062202454), (0.34453782439231873, 0.49411764740943909,
0.49411764740943909), (0.34873950481414795, 0.49803921580314636,
0.49803921580314636), (0.35294118523597717, 0.50196081399917603,
0.50196081399917603), (0.3571428656578064, 0.50980395078659058,
0.50980395078659058), (0.36134454607963562, 0.51372551918029785,
0.51372551918029785), (0.36554622650146484, 0.5215686559677124,
0.5215686559677124), (0.36974790692329407, 0.52549022436141968,
0.52549022436141968), (0.37394958734512329, 0.53333336114883423,
0.53333336114883423), (0.37815126776695251, 0.54509806632995605,
0.54509806632995605), (0.38235294818878174, 0.54901963472366333,
0.54901963472366333), (0.38655462861061096, 0.55294120311737061,
0.55294120311737061), (0.39075630903244019, 0.56078433990478516,
0.56078433990478516), (0.39495798945426941, 0.56470590829849243,
0.56470590829849243), (0.39915966987609863, 0.57254904508590698,
0.57254904508590698), (0.40336135029792786, 0.57647061347961426,
0.57647061347961426), (0.40756303071975708, 0.58431375026702881,
0.58431375026702881), (0.4117647111415863, 0.58823531866073608,
0.58823531866073608), (0.41596639156341553, 0.59607845544815063,
0.59607845544815063), (0.42016807198524475, 0.60000002384185791,
0.60000002384185791), (0.42436975240707397, 0.60784316062927246,
0.60784316062927246), (0.4285714328289032, 0.61176472902297974,
0.61176472902297974), (0.43277311325073242, 0.61568629741668701,
0.61568629741668701), (0.43697479367256165, 0.62352943420410156,
0.62352943420410156), (0.44117647409439087, 0.62745100259780884,
0.62745100259780884), (0.44537815451622009, 0.63529413938522339,
0.63529413938522339), (0.44957983493804932, 0.63921570777893066,
0.63921570777893066), (0.45378151535987854, 0.64705884456634521,
0.64705884456634521), (0.45798319578170776, 0.65098041296005249,
0.65098041296005249), (0.46218487620353699, 0.66274511814117432,
0.66274511814117432), (0.46638655662536621, 0.66666668653488159,
0.66666668653488159), (0.47058823704719543, 0.67450982332229614,
0.67450982332229614), (0.47478991746902466, 0.67843139171600342,
0.67843139171600342), (0.47899159789085388, 0.68627452850341797,
0.68627452850341797), (0.48319327831268311, 0.69019609689712524,
0.69019609689712524), (0.48739495873451233, 0.69803923368453979,
0.69803923368453979), (0.49159663915634155, 0.70196080207824707,
0.70196080207824707), (0.49579831957817078, 0.70980393886566162,
0.70980393886566162), (0.5, 0.7137255072593689, 0.7137255072593689),
(0.50420171022415161, 0.72549021244049072, 0.72549021244049072),
(0.50840336084365845, 0.729411780834198, 0.729411780834198),
(0.51260507106781006, 0.73725491762161255, 0.73725491762161255),
(0.51680672168731689, 0.74117648601531982, 0.74117648601531982),
(0.52100843191146851, 0.74901962280273438, 0.74901962280273438),
(0.52521008253097534, 0.75294119119644165, 0.75294119119644165),
(0.52941179275512695, 0.7607843279838562, 0.7607843279838562),
(0.53361344337463379, 0.76470589637756348, 0.76470589637756348),
(0.5378151535987854, 0.77254903316497803, 0.77254903316497803),
(0.54201680421829224, 0.7764706015586853, 0.7764706015586853),
(0.54621851444244385, 0.78823530673980713, 0.78823530673980713),
(0.55042016506195068, 0.7921568751335144, 0.7921568751335144),
(0.55462187528610229, 0.80000001192092896, 0.80000001192092896),
(0.55882352590560913, 0.80392158031463623, 0.80392158031463623),
(0.56302523612976074, 0.81176471710205078, 0.81176471710205078),
(0.56722688674926758, 0.81568628549575806, 0.81568628549575806),
(0.57142859697341919, 0.82352942228317261, 0.82352942228317261),
(0.57563024759292603, 0.82745099067687988, 0.82745099067687988),
(0.57983195781707764, 0.83137255907058716, 0.83137255907058716),
(0.58403360843658447, 0.83921569585800171, 0.83921569585800171),
(0.58823531866073608, 0.84313726425170898, 0.84313726425170898),
(0.59243696928024292, 0.85098040103912354, 0.85098040103912354),
(0.59663867950439453, 0.85490196943283081, 0.85490196943283081),
(0.60084033012390137, 0.86274510622024536, 0.86274510622024536),
(0.60504204034805298, 0.86666667461395264, 0.86666667461395264),
(0.60924369096755981, 0.87450981140136719, 0.87450981140136719),
(0.61344540119171143, 0.87843137979507446, 0.87843137979507446),
(0.61764705181121826, 0.88627451658248901, 0.88627451658248901),
(0.62184876203536987, 0.89019608497619629, 0.89019608497619629),
(0.62605041265487671, 0.89411765336990356, 0.89411765336990356),
(0.63025212287902832, 0.90588235855102539, 0.90588235855102539),
(0.63445377349853516, 0.91372549533843994, 0.91372549533843994),
(0.63865548372268677, 0.91764706373214722, 0.91764706373214722),
(0.6428571343421936, 0.92549020051956177, 0.92549020051956177),
(0.64705884456634521, 0.92941176891326904, 0.92941176891326904),
(0.65126049518585205, 0.93725490570068359, 0.93725490570068359),
(0.65546220541000366, 0.94117647409439087, 0.94117647409439087),
(0.6596638560295105, 0.94509804248809814, 0.94509804248809814),
(0.66386556625366211, 0.9529411792755127, 0.9529411792755127),
(0.66806721687316895, 0.95686274766921997, 0.95686274766921997),
(0.67226892709732056, 0.96470588445663452, 0.96470588445663452),
(0.67647057771682739, 0.9686274528503418, 0.9686274528503418),
(0.680672287940979, 0.97647058963775635, 0.97647058963775635),
(0.68487393856048584, 0.98039215803146362, 0.98039215803146362),
(0.68907564878463745, 0.98823529481887817, 0.98823529481887817),
(0.69327729940414429, 0.99215686321258545, 0.99215686321258545),
(0.6974790096282959, 1.0, 1.0), (0.70168066024780273, 1.0, 1.0),
(0.70588237047195435, 1.0, 1.0), (0.71008402109146118, 1.0, 1.0),
(0.71428573131561279, 1.0, 1.0), (0.71848738193511963, 1.0, 1.0),
(0.72268909215927124, 1.0, 1.0), (0.72689074277877808, 1.0, 1.0),
(0.73109245300292969, 1.0, 1.0), (0.73529410362243652, 1.0, 1.0),
(0.73949581384658813, 1.0, 1.0), (0.74369746446609497, 1.0, 1.0),
(0.74789917469024658, 1.0, 1.0), (0.75210082530975342, 1.0, 1.0),
(0.75630253553390503, 1.0, 1.0), (0.76050418615341187, 1.0, 1.0),
(0.76470589637756348, 1.0, 1.0), (0.76890754699707031, 1.0, 1.0),
(0.77310925722122192, 1.0, 1.0), (0.77731090784072876, 1.0, 1.0),
(0.78151261806488037, 1.0, 1.0), (0.78571426868438721, 1.0, 1.0),
(0.78991597890853882, 1.0, 1.0), (0.79411762952804565, 1.0, 1.0),
(0.79831933975219727, 1.0, 1.0), (0.8025209903717041, 1.0, 1.0),
(0.80672270059585571, 1.0, 1.0), (0.81092435121536255, 1.0, 1.0),
(0.81512606143951416, 1.0, 1.0), (0.819327712059021, 1.0, 1.0),
(0.82352942228317261, 1.0, 1.0), (0.82773107290267944, 1.0, 1.0),
(0.83193278312683105, 1.0, 1.0), (0.83613443374633789, 1.0, 1.0),
(0.8403361439704895, 1.0, 1.0), (0.84453779458999634, 1.0, 1.0),
(0.84873950481414795, 1.0, 1.0), (0.85294115543365479, 1.0, 1.0),
(0.8571428656578064, 1.0, 1.0), (0.86134451627731323, 1.0, 1.0),
(0.86554622650146484, 1.0, 1.0), (0.86974787712097168, 1.0, 1.0),
(0.87394958734512329, 1.0, 1.0), (0.87815123796463013, 1.0, 1.0),
(0.88235294818878174, 1.0, 1.0), (0.88655459880828857, 1.0, 1.0),
(0.89075630903244019, 1.0, 1.0), (0.89495795965194702, 1.0, 1.0),
(0.89915966987609863, 1.0, 1.0), (0.90336132049560547, 1.0, 1.0),
(0.90756303071975708, 1.0, 1.0), (0.91176468133926392, 1.0, 1.0),
(0.91596639156341553, 1.0, 1.0), (0.92016804218292236, 1.0, 1.0),
(0.92436975240707397, 1.0, 1.0), (0.92857140302658081, 1.0, 1.0),
(0.93277311325073242, 1.0, 1.0), (0.93697476387023926, 1.0, 1.0),
(0.94117647409439087, 1.0, 1.0), (0.94537812471389771, 1.0, 1.0),
(0.94957983493804932, 1.0, 1.0), (0.95378148555755615, 1.0, 1.0),
(0.95798319578170776, 1.0, 1.0), (0.9621848464012146, 1.0, 1.0),
(0.96638655662536621, 1.0, 1.0), (0.97058820724487305, 1.0, 1.0),
(0.97478991746902466, 1.0, 1.0), (0.97899156808853149, 1.0, 1.0),
(0.98319327831268311, 1.0, 1.0), (0.98739492893218994, 1.0, 1.0),
(0.99159663915634155, 1.0, 1.0), (0.99579828977584839, 1.0, 1.0), (1.0,
1.0, 1.0)]}
_gist_ncar_data = {'blue': [(0.0, 0.50196081399917603,
0.50196081399917603), (0.0050505050458014011, 0.45098039507865906,
0.45098039507865906), (0.010101010091602802, 0.40392157435417175,
0.40392157435417175), (0.015151515603065491, 0.35686275362968445,
0.35686275362968445), (0.020202020183205605, 0.30980393290519714,
0.30980393290519714), (0.025252524763345718, 0.25882354378700256,
0.25882354378700256), (0.030303031206130981, 0.21176470816135406,
0.21176470816135406), (0.035353533923625946, 0.16470588743686676,
0.16470588743686676), (0.040404040366411209, 0.11764705926179886,
0.11764705926179886), (0.045454546809196472, 0.070588238537311554,
0.070588238537311554), (0.050505049526691437, 0.019607843831181526,
0.019607843831181526), (0.0555555559694767, 0.047058824449777603,
0.047058824449777603), (0.060606062412261963, 0.14509804546833038,
0.14509804546833038), (0.065656565129756927, 0.23921568691730499,
0.23921568691730499), (0.070707067847251892, 0.3333333432674408,
0.3333333432674408), (0.075757578015327454, 0.43137255311012268,
0.43137255311012268), (0.080808080732822418, 0.52549022436141968,
0.52549022436141968), (0.085858583450317383, 0.61960786581039429,
0.61960786581039429), (0.090909093618392944, 0.71764707565307617,
0.71764707565307617), (0.095959596335887909, 0.81176471710205078,
0.81176471710205078), (0.10101009905338287, 0.90588235855102539,
0.90588235855102539), (0.10606060922145844, 1.0, 1.0),
(0.1111111119389534, 1.0, 1.0), (0.11616161465644836, 1.0, 1.0),
(0.12121212482452393, 1.0, 1.0), (0.12626262009143829, 1.0, 1.0),
(0.13131313025951385, 1.0, 1.0), (0.13636364042758942, 1.0, 1.0),
(0.14141413569450378, 1.0, 1.0), (0.14646464586257935, 1.0, 1.0),
(0.15151515603065491, 1.0, 1.0), (0.15656565129756927, 1.0, 1.0),
(0.16161616146564484, 1.0, 1.0), (0.1666666716337204, 1.0, 1.0),
(0.17171716690063477, 1.0, 1.0), (0.17676767706871033, 1.0, 1.0),
(0.18181818723678589, 1.0, 1.0), (0.18686868250370026, 1.0, 1.0),
(0.19191919267177582, 1.0, 1.0), (0.19696970283985138, 1.0, 1.0),
(0.20202019810676575, 1.0, 1.0), (0.20707070827484131, 1.0, 1.0),
(0.21212121844291687, 0.99215686321258545, 0.99215686321258545),
(0.21717171370983124, 0.95686274766921997, 0.95686274766921997),
(0.2222222238779068, 0.91764706373214722, 0.91764706373214722),
(0.22727273404598236, 0.88235294818878174, 0.88235294818878174),
(0.23232322931289673, 0.84313726425170898, 0.84313726425170898),
(0.23737373948097229, 0.80392158031463623, 0.80392158031463623),
(0.24242424964904785, 0.76862746477127075, 0.76862746477127075),
(0.24747474491596222, 0.729411780834198, 0.729411780834198),
(0.25252524018287659, 0.69019609689712524, 0.69019609689712524),
(0.25757575035095215, 0.65490198135375977, 0.65490198135375977),
(0.26262626051902771, 0.61568629741668701, 0.61568629741668701),
(0.26767677068710327, 0.56470590829849243, 0.56470590829849243),
(0.27272728085517883, 0.50980395078659058, 0.50980395078659058),
(0.27777779102325439, 0.45098039507865906, 0.45098039507865906),
(0.28282827138900757, 0.39215686917304993, 0.39215686917304993),
(0.28787878155708313, 0.3333333432674408, 0.3333333432674408),
(0.29292929172515869, 0.27843138575553894, 0.27843138575553894),
(0.29797980189323425, 0.21960784494876862, 0.21960784494876862),
(0.30303031206130981, 0.16078431904315948, 0.16078431904315948),
(0.30808082222938538, 0.10588235408067703, 0.10588235408067703),
(0.31313130259513855, 0.047058824449777603, 0.047058824449777603),
(0.31818181276321411, 0.0, 0.0), (0.32323232293128967, 0.0, 0.0),
(0.32828283309936523, 0.0, 0.0), (0.3333333432674408, 0.0, 0.0),
(0.33838382363319397, 0.0, 0.0), (0.34343433380126953, 0.0, 0.0),
(0.34848484396934509, 0.0, 0.0), (0.35353535413742065, 0.0, 0.0),
(0.35858586430549622, 0.0, 0.0), (0.36363637447357178, 0.0, 0.0),
(0.36868685483932495, 0.0, 0.0), (0.37373736500740051, 0.0, 0.0),
(0.37878787517547607, 0.0, 0.0), (0.38383838534355164, 0.0, 0.0),
(0.3888888955116272, 0.0, 0.0), (0.39393940567970276, 0.0, 0.0),
(0.39898988604545593, 0.0, 0.0), (0.40404039621353149, 0.0, 0.0),
(0.40909090638160706, 0.0, 0.0), (0.41414141654968262, 0.0, 0.0),
(0.41919192671775818, 0.0, 0.0), (0.42424243688583374,
0.0039215688593685627, 0.0039215688593685627), (0.42929291725158691,
0.027450980618596077, 0.027450980618596077), (0.43434342741966248,
0.050980392843484879, 0.050980392843484879), (0.43939393758773804,
0.074509806931018829, 0.074509806931018829), (0.4444444477558136,
0.094117648899555206, 0.094117648899555206), (0.44949495792388916,
0.11764705926179886, 0.11764705926179886), (0.45454546809196472,
0.14117647707462311, 0.14117647707462311), (0.4595959484577179,
0.16470588743686676, 0.16470588743686676), (0.46464645862579346,
0.18823529779911041, 0.18823529779911041), (0.46969696879386902,
0.21176470816135406, 0.21176470816135406), (0.47474747896194458,
0.23529411852359772, 0.23529411852359772), (0.47979798913002014,
0.22352941334247589, 0.22352941334247589), (0.4848484992980957,
0.20000000298023224, 0.20000000298023224), (0.48989897966384888,
0.17647059261798859, 0.17647059261798859), (0.49494948983192444,
0.15294118225574493, 0.15294118225574493), (0.5, 0.12941177189350128,
0.12941177189350128), (0.50505048036575317, 0.10980392247438431,
0.10980392247438431), (0.51010102033615112, 0.086274512112140656,
0.086274512112140656), (0.5151515007019043, 0.062745101749897003,
0.062745101749897003), (0.52020204067230225, 0.039215687662363052,
0.039215687662363052), (0.52525252103805542, 0.015686275437474251,
0.015686275437474251), (0.53030300140380859, 0.0, 0.0),
(0.53535354137420654, 0.0, 0.0), (0.54040402173995972, 0.0, 0.0),
(0.54545456171035767, 0.0, 0.0), (0.55050504207611084, 0.0, 0.0),
(0.55555558204650879, 0.0, 0.0), (0.56060606241226196, 0.0, 0.0),
(0.56565654277801514, 0.0, 0.0), (0.57070708274841309, 0.0, 0.0),
(0.57575756311416626, 0.0, 0.0), (0.58080810308456421, 0.0, 0.0),
(0.58585858345031738, 0.0039215688593685627, 0.0039215688593685627),
(0.59090906381607056, 0.0078431377187371254, 0.0078431377187371254),
(0.59595960378646851, 0.011764706112444401, 0.011764706112444401),
(0.60101008415222168, 0.019607843831181526, 0.019607843831181526),
(0.60606062412261963, 0.023529412224888802, 0.023529412224888802),
(0.6111111044883728, 0.031372550874948502, 0.031372550874948502),
(0.61616164445877075, 0.035294119268655777, 0.035294119268655777),
(0.62121212482452393, 0.043137256056070328, 0.043137256056070328),
(0.6262626051902771, 0.047058824449777603, 0.047058824449777603),
(0.63131314516067505, 0.054901961237192154, 0.054901961237192154),
(0.63636362552642822, 0.054901961237192154, 0.054901961237192154),
(0.64141416549682617, 0.050980392843484879, 0.050980392843484879),
(0.64646464586257935, 0.043137256056070328, 0.043137256056070328),
(0.65151512622833252, 0.039215687662363052, 0.039215687662363052),
(0.65656566619873047, 0.031372550874948502, 0.031372550874948502),
(0.66161614656448364, 0.027450980618596077, 0.027450980618596077),
(0.66666668653488159, 0.019607843831181526, 0.019607843831181526),
(0.67171716690063477, 0.015686275437474251, 0.015686275437474251),
(0.67676764726638794, 0.011764706112444401, 0.011764706112444401),
(0.68181818723678589, 0.0039215688593685627, 0.0039215688593685627),
(0.68686866760253906, 0.0, 0.0), (0.69191920757293701, 0.0, 0.0),
(0.69696968793869019, 0.0, 0.0), (0.70202022790908813, 0.0, 0.0),
(0.70707070827484131, 0.0, 0.0), (0.71212118864059448, 0.0, 0.0),
(0.71717172861099243, 0.0, 0.0), (0.72222220897674561, 0.0, 0.0),
(0.72727274894714355, 0.0, 0.0), (0.73232322931289673, 0.0, 0.0),
(0.7373737096786499, 0.0, 0.0), (0.74242424964904785,
0.031372550874948502, 0.031372550874948502), (0.74747473001480103,
0.12941177189350128, 0.12941177189350128), (0.75252526998519897,
0.22352941334247589, 0.22352941334247589), (0.75757575035095215,
0.32156863808631897, 0.32156863808631897), (0.7626262903213501,
0.41568627953529358, 0.41568627953529358), (0.76767677068710327,
0.50980395078659058, 0.50980395078659058), (0.77272725105285645,
0.60784316062927246, 0.60784316062927246), (0.77777779102325439,
0.70196080207824707, 0.70196080207824707), (0.78282827138900757,
0.79607844352722168, 0.79607844352722168), (0.78787881135940552,
0.89411765336990356, 0.89411765336990356), (0.79292929172515869,
0.98823529481887817, 0.98823529481887817), (0.79797977209091187, 1.0,
1.0), (0.80303031206130981, 1.0, 1.0), (0.80808079242706299, 1.0, 1.0),
(0.81313133239746094, 1.0, 1.0), (0.81818181276321411, 1.0, 1.0),
(0.82323235273361206, 1.0, 1.0), (0.82828283309936523, 1.0, 1.0),
(0.83333331346511841, 1.0, 1.0), (0.83838385343551636, 1.0, 1.0),
(0.84343433380126953, 1.0, 1.0), (0.84848487377166748,
0.99607843160629272, 0.99607843160629272), (0.85353535413742065,
0.98823529481887817, 0.98823529481887817), (0.85858583450317383,
0.9843137264251709, 0.9843137264251709), (0.86363637447357178,
0.97647058963775635, 0.97647058963775635), (0.86868685483932495,
0.9686274528503418, 0.9686274528503418), (0.8737373948097229,
0.96470588445663452, 0.96470588445663452), (0.87878787517547607,
0.95686274766921997, 0.95686274766921997), (0.88383835554122925,
0.94901961088180542, 0.94901961088180542), (0.8888888955116272,
0.94509804248809814, 0.94509804248809814), (0.89393937587738037,
0.93725490570068359, 0.93725490570068359), (0.89898991584777832,
0.93333333730697632, 0.93333333730697632), (0.90404039621353149,
0.93333333730697632, 0.93333333730697632), (0.90909093618392944,
0.93725490570068359, 0.93725490570068359), (0.91414141654968262,
0.93725490570068359, 0.93725490570068359), (0.91919189691543579,
0.94117647409439087, 0.94117647409439087), (0.92424243688583374,
0.94509804248809814, 0.94509804248809814), (0.92929291725158691,
0.94509804248809814, 0.94509804248809814), (0.93434345722198486,
0.94901961088180542, 0.94901961088180542), (0.93939393758773804,
0.9529411792755127, 0.9529411792755127), (0.94444441795349121,
0.9529411792755127, 0.9529411792755127), (0.94949495792388916,
0.95686274766921997, 0.95686274766921997), (0.95454543828964233,
0.96078431606292725, 0.96078431606292725), (0.95959597826004028,
0.96470588445663452, 0.96470588445663452), (0.96464645862579346,
0.9686274528503418, 0.9686274528503418), (0.96969699859619141,
0.97254902124404907, 0.97254902124404907), (0.97474747896194458,
0.97647058963775635, 0.97647058963775635), (0.97979795932769775,
0.98039215803146362, 0.98039215803146362), (0.9848484992980957,
0.9843137264251709, 0.9843137264251709), (0.98989897966384888,
0.98823529481887817, 0.98823529481887817), (0.99494951963424683,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)], 'green': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.035294119268655777, 0.035294119268655777), (0.010101010091602802,
0.074509806931018829, 0.074509806931018829), (0.015151515603065491,
0.10980392247438431, 0.10980392247438431), (0.020202020183205605,
0.14901961386203766, 0.14901961386203766), (0.025252524763345718,
0.18431372940540314, 0.18431372940540314), (0.030303031206130981,
0.22352941334247589, 0.22352941334247589), (0.035353533923625946,
0.25882354378700256, 0.25882354378700256), (0.040404040366411209,
0.29803922772407532, 0.29803922772407532), (0.045454546809196472,
0.3333333432674408, 0.3333333432674408), (0.050505049526691437,
0.37254902720451355, 0.37254902720451355), (0.0555555559694767,
0.36862745881080627, 0.36862745881080627), (0.060606062412261963,
0.3333333432674408, 0.3333333432674408), (0.065656565129756927,
0.29411765933036804, 0.29411765933036804), (0.070707067847251892,
0.25882354378700256, 0.25882354378700256), (0.075757578015327454,
0.21960784494876862, 0.21960784494876862), (0.080808080732822418,
0.18431372940540314, 0.18431372940540314), (0.085858583450317383,
0.14509804546833038, 0.14509804546833038), (0.090909093618392944,
0.10980392247438431, 0.10980392247438431), (0.095959596335887909,
0.070588238537311554, 0.070588238537311554), (0.10101009905338287,
0.035294119268655777, 0.035294119268655777), (0.10606060922145844, 0.0,
0.0), (0.1111111119389534, 0.074509806931018829, 0.074509806931018829),
(0.11616161465644836, 0.14509804546833038, 0.14509804546833038),
(0.12121212482452393, 0.21568627655506134, 0.21568627655506134),
(0.12626262009143829, 0.28627452254295349, 0.28627452254295349),
(0.13131313025951385, 0.36078432202339172, 0.36078432202339172),
(0.13636364042758942, 0.43137255311012268, 0.43137255311012268),
(0.14141413569450378, 0.50196081399917603, 0.50196081399917603),
(0.14646464586257935, 0.57254904508590698, 0.57254904508590698),
(0.15151515603065491, 0.64705884456634521, 0.64705884456634521),
(0.15656565129756927, 0.71764707565307617, 0.71764707565307617),
(0.16161616146564484, 0.7607843279838562, 0.7607843279838562),
(0.1666666716337204, 0.78431373834609985, 0.78431373834609985),
(0.17171716690063477, 0.80784314870834351, 0.80784314870834351),
(0.17676767706871033, 0.83137255907058716, 0.83137255907058716),
(0.18181818723678589, 0.85490196943283081, 0.85490196943283081),
(0.18686868250370026, 0.88235294818878174, 0.88235294818878174),
(0.19191919267177582, 0.90588235855102539, 0.90588235855102539),
(0.19696970283985138, 0.92941176891326904, 0.92941176891326904),
(0.20202019810676575, 0.9529411792755127, 0.9529411792755127),
(0.20707070827484131, 0.97647058963775635, 0.97647058963775635),
(0.21212121844291687, 0.99607843160629272, 0.99607843160629272),
(0.21717171370983124, 0.99607843160629272, 0.99607843160629272),
(0.2222222238779068, 0.99215686321258545, 0.99215686321258545),
(0.22727273404598236, 0.99215686321258545, 0.99215686321258545),
(0.23232322931289673, 0.99215686321258545, 0.99215686321258545),
(0.23737373948097229, 0.98823529481887817, 0.98823529481887817),
(0.24242424964904785, 0.98823529481887817, 0.98823529481887817),
(0.24747474491596222, 0.9843137264251709, 0.9843137264251709),
(0.25252524018287659, 0.9843137264251709, 0.9843137264251709),
(0.25757575035095215, 0.98039215803146362, 0.98039215803146362),
(0.26262626051902771, 0.98039215803146362, 0.98039215803146362),
(0.26767677068710327, 0.98039215803146362, 0.98039215803146362),
(0.27272728085517883, 0.98039215803146362, 0.98039215803146362),
(0.27777779102325439, 0.9843137264251709, 0.9843137264251709),
(0.28282827138900757, 0.9843137264251709, 0.9843137264251709),
(0.28787878155708313, 0.98823529481887817, 0.98823529481887817),
(0.29292929172515869, 0.98823529481887817, 0.98823529481887817),
(0.29797980189323425, 0.99215686321258545, 0.99215686321258545),
(0.30303031206130981, 0.99215686321258545, 0.99215686321258545),
(0.30808082222938538, 0.99607843160629272, 0.99607843160629272),
(0.31313130259513855, 0.99607843160629272, 0.99607843160629272),
(0.31818181276321411, 0.99607843160629272, 0.99607843160629272),
(0.32323232293128967, 0.97647058963775635, 0.97647058963775635),
(0.32828283309936523, 0.95686274766921997, 0.95686274766921997),
(0.3333333432674408, 0.93725490570068359, 0.93725490570068359),
(0.33838382363319397, 0.92156863212585449, 0.92156863212585449),
(0.34343433380126953, 0.90196079015731812, 0.90196079015731812),
(0.34848484396934509, 0.88235294818878174, 0.88235294818878174),
(0.35353535413742065, 0.86274510622024536, 0.86274510622024536),
(0.35858586430549622, 0.84705883264541626, 0.84705883264541626),
(0.36363637447357178, 0.82745099067687988, 0.82745099067687988),
(0.36868685483932495, 0.80784314870834351, 0.80784314870834351),
(0.37373736500740051, 0.81568628549575806, 0.81568628549575806),
(0.37878787517547607, 0.83529412746429443, 0.83529412746429443),
(0.38383838534355164, 0.85098040103912354, 0.85098040103912354),
(0.3888888955116272, 0.87058824300765991, 0.87058824300765991),
(0.39393940567970276, 0.89019608497619629, 0.89019608497619629),
(0.39898988604545593, 0.90980392694473267, 0.90980392694473267),
(0.40404039621353149, 0.92549020051956177, 0.92549020051956177),
(0.40909090638160706, 0.94509804248809814, 0.94509804248809814),
(0.41414141654968262, 0.96470588445663452, 0.96470588445663452),
(0.41919192671775818, 0.9843137264251709, 0.9843137264251709),
(0.42424243688583374, 1.0, 1.0), (0.42929291725158691, 1.0, 1.0),
(0.43434342741966248, 1.0, 1.0), (0.43939393758773804, 1.0, 1.0),
(0.4444444477558136, 1.0, 1.0), (0.44949495792388916, 1.0, 1.0),
(0.45454546809196472, 1.0, 1.0), (0.4595959484577179, 1.0, 1.0),
(0.46464645862579346, 1.0, 1.0), (0.46969696879386902, 1.0, 1.0),
(0.47474747896194458, 1.0, 1.0), (0.47979798913002014, 1.0, 1.0),
(0.4848484992980957, 1.0, 1.0), (0.48989897966384888, 1.0, 1.0),
(0.49494948983192444, 1.0, 1.0), (0.5, 1.0, 1.0), (0.50505048036575317,
1.0, 1.0), (0.51010102033615112, 1.0, 1.0), (0.5151515007019043, 1.0,
1.0), (0.52020204067230225, 1.0, 1.0), (0.52525252103805542, 1.0, 1.0),
(0.53030300140380859, 0.99215686321258545, 0.99215686321258545),
(0.53535354137420654, 0.98039215803146362, 0.98039215803146362),
(0.54040402173995972, 0.96470588445663452, 0.96470588445663452),
(0.54545456171035767, 0.94901961088180542, 0.94901961088180542),
(0.55050504207611084, 0.93333333730697632, 0.93333333730697632),
(0.55555558204650879, 0.91764706373214722, 0.91764706373214722),
(0.56060606241226196, 0.90588235855102539, 0.90588235855102539),
(0.56565654277801514, 0.89019608497619629, 0.89019608497619629),
(0.57070708274841309, 0.87450981140136719, 0.87450981140136719),
(0.57575756311416626, 0.85882353782653809, 0.85882353782653809),
(0.58080810308456421, 0.84313726425170898, 0.84313726425170898),
(0.58585858345031738, 0.83137255907058716, 0.83137255907058716),
(0.59090906381607056, 0.81960785388946533, 0.81960785388946533),
(0.59595960378646851, 0.81176471710205078, 0.81176471710205078),
(0.60101008415222168, 0.80000001192092896, 0.80000001192092896),
(0.60606062412261963, 0.78823530673980713, 0.78823530673980713),
(0.6111111044883728, 0.7764706015586853, 0.7764706015586853),
(0.61616164445877075, 0.76470589637756348, 0.76470589637756348),
(0.62121212482452393, 0.75294119119644165, 0.75294119119644165),
(0.6262626051902771, 0.74117648601531982, 0.74117648601531982),
(0.63131314516067505, 0.729411780834198, 0.729411780834198),
(0.63636362552642822, 0.70980393886566162, 0.70980393886566162),
(0.64141416549682617, 0.66666668653488159, 0.66666668653488159),
(0.64646464586257935, 0.62352943420410156, 0.62352943420410156),
(0.65151512622833252, 0.58039218187332153, 0.58039218187332153),
(0.65656566619873047, 0.5372549295425415, 0.5372549295425415),
(0.66161614656448364, 0.49411764740943909, 0.49411764740943909),
(0.66666668653488159, 0.45098039507865906, 0.45098039507865906),
(0.67171716690063477, 0.40392157435417175, 0.40392157435417175),
(0.67676764726638794, 0.36078432202339172, 0.36078432202339172),
(0.68181818723678589, 0.31764706969261169, 0.31764706969261169),
(0.68686866760253906, 0.27450981736183167, 0.27450981736183167),
(0.69191920757293701, 0.24705882370471954, 0.24705882370471954),
(0.69696968793869019, 0.21960784494876862, 0.21960784494876862),
(0.70202022790908813, 0.19607843458652496, 0.19607843458652496),
(0.70707070827484131, 0.16862745583057404, 0.16862745583057404),
(0.71212118864059448, 0.14509804546833038, 0.14509804546833038),
(0.71717172861099243, 0.11764705926179886, 0.11764705926179886),
(0.72222220897674561, 0.090196080505847931, 0.090196080505847931),
(0.72727274894714355, 0.066666670143604279, 0.066666670143604279),
(0.73232322931289673, 0.039215687662363052, 0.039215687662363052),
(0.7373737096786499, 0.015686275437474251, 0.015686275437474251),
(0.74242424964904785, 0.0, 0.0), (0.74747473001480103, 0.0, 0.0),
(0.75252526998519897, 0.0, 0.0), (0.75757575035095215, 0.0, 0.0),
(0.7626262903213501, 0.0, 0.0), (0.76767677068710327, 0.0, 0.0),
(0.77272725105285645, 0.0, 0.0), (0.77777779102325439, 0.0, 0.0),
(0.78282827138900757, 0.0, 0.0), (0.78787881135940552, 0.0, 0.0),
(0.79292929172515869, 0.0, 0.0), (0.79797977209091187,
0.015686275437474251, 0.015686275437474251), (0.80303031206130981,
0.031372550874948502, 0.031372550874948502), (0.80808079242706299,
0.050980392843484879, 0.050980392843484879), (0.81313133239746094,
0.066666670143604279, 0.066666670143604279), (0.81818181276321411,
0.086274512112140656, 0.086274512112140656), (0.82323235273361206,
0.10588235408067703, 0.10588235408067703), (0.82828283309936523,
0.12156862765550613, 0.12156862765550613), (0.83333331346511841,
0.14117647707462311, 0.14117647707462311), (0.83838385343551636,
0.15686275064945221, 0.15686275064945221), (0.84343433380126953,
0.17647059261798859, 0.17647059261798859), (0.84848487377166748,
0.20000000298023224, 0.20000000298023224), (0.85353535413742065,
0.23137255012989044, 0.23137255012989044), (0.85858583450317383,
0.25882354378700256, 0.25882354378700256), (0.86363637447357178,
0.29019609093666077, 0.29019609093666077), (0.86868685483932495,
0.32156863808631897, 0.32156863808631897), (0.8737373948097229,
0.35294118523597717, 0.35294118523597717), (0.87878787517547607,
0.38431373238563538, 0.38431373238563538), (0.88383835554122925,
0.41568627953529358, 0.41568627953529358), (0.8888888955116272,
0.44313725829124451, 0.44313725829124451), (0.89393937587738037,
0.47450980544090271, 0.47450980544090271), (0.89898991584777832,
0.5058823823928833, 0.5058823823928833), (0.90404039621353149,
0.52941179275512695, 0.52941179275512695), (0.90909093618392944,
0.55294120311737061, 0.55294120311737061), (0.91414141654968262,
0.57254904508590698, 0.57254904508590698), (0.91919189691543579,
0.59607845544815063, 0.59607845544815063), (0.92424243688583374,
0.61960786581039429, 0.61960786581039429), (0.92929291725158691,
0.64313727617263794, 0.64313727617263794), (0.93434345722198486,
0.66274511814117432, 0.66274511814117432), (0.93939393758773804,
0.68627452850341797, 0.68627452850341797), (0.94444441795349121,
0.70980393886566162, 0.70980393886566162), (0.94949495792388916,
0.729411780834198, 0.729411780834198), (0.95454543828964233,
0.75294119119644165, 0.75294119119644165), (0.95959597826004028,
0.78039216995239258, 0.78039216995239258), (0.96464645862579346,
0.80392158031463623, 0.80392158031463623), (0.96969699859619141,
0.82745099067687988, 0.82745099067687988), (0.97474747896194458,
0.85098040103912354, 0.85098040103912354), (0.97979795932769775,
0.87450981140136719, 0.87450981140136719), (0.9848484992980957,
0.90196079015731812, 0.90196079015731812), (0.98989897966384888,
0.92549020051956177, 0.92549020051956177), (0.99494951963424683,
0.94901961088180542, 0.94901961088180542), (1.0, 0.97254902124404907,
0.97254902124404907)], 'red': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.0, 0.0), (0.010101010091602802, 0.0, 0.0), (0.015151515603065491, 0.0,
0.0), (0.020202020183205605, 0.0, 0.0), (0.025252524763345718, 0.0, 0.0),
(0.030303031206130981, 0.0, 0.0), (0.035353533923625946, 0.0, 0.0),
(0.040404040366411209, 0.0, 0.0), (0.045454546809196472, 0.0, 0.0),
(0.050505049526691437, 0.0, 0.0), (0.0555555559694767, 0.0, 0.0),
(0.060606062412261963, 0.0, 0.0), (0.065656565129756927, 0.0, 0.0),
(0.070707067847251892, 0.0, 0.0), (0.075757578015327454, 0.0, 0.0),
(0.080808080732822418, 0.0, 0.0), (0.085858583450317383, 0.0, 0.0),
(0.090909093618392944, 0.0, 0.0), (0.095959596335887909, 0.0, 0.0),
(0.10101009905338287, 0.0, 0.0), (0.10606060922145844, 0.0, 0.0),
(0.1111111119389534, 0.0, 0.0), (0.11616161465644836, 0.0, 0.0),
(0.12121212482452393, 0.0, 0.0), (0.12626262009143829, 0.0, 0.0),
(0.13131313025951385, 0.0, 0.0), (0.13636364042758942, 0.0, 0.0),
(0.14141413569450378, 0.0, 0.0), (0.14646464586257935, 0.0, 0.0),
(0.15151515603065491, 0.0, 0.0), (0.15656565129756927, 0.0, 0.0),
(0.16161616146564484, 0.0, 0.0), (0.1666666716337204, 0.0, 0.0),
(0.17171716690063477, 0.0, 0.0), (0.17676767706871033, 0.0, 0.0),
(0.18181818723678589, 0.0, 0.0), (0.18686868250370026, 0.0, 0.0),
(0.19191919267177582, 0.0, 0.0), (0.19696970283985138, 0.0, 0.0),
(0.20202019810676575, 0.0, 0.0), (0.20707070827484131, 0.0, 0.0),
(0.21212121844291687, 0.0, 0.0), (0.21717171370983124, 0.0, 0.0),
(0.2222222238779068, 0.0, 0.0), (0.22727273404598236, 0.0, 0.0),
(0.23232322931289673, 0.0, 0.0), (0.23737373948097229, 0.0, 0.0),
(0.24242424964904785, 0.0, 0.0), (0.24747474491596222, 0.0, 0.0),
(0.25252524018287659, 0.0, 0.0), (0.25757575035095215, 0.0, 0.0),
(0.26262626051902771, 0.0, 0.0), (0.26767677068710327, 0.0, 0.0),
(0.27272728085517883, 0.0, 0.0), (0.27777779102325439, 0.0, 0.0),
(0.28282827138900757, 0.0, 0.0), (0.28787878155708313, 0.0, 0.0),
(0.29292929172515869, 0.0, 0.0), (0.29797980189323425, 0.0, 0.0),
(0.30303031206130981, 0.0, 0.0), (0.30808082222938538, 0.0, 0.0),
(0.31313130259513855, 0.0, 0.0), (0.31818181276321411,
0.0039215688593685627, 0.0039215688593685627), (0.32323232293128967,
0.043137256056070328, 0.043137256056070328), (0.32828283309936523,
0.08235294371843338, 0.08235294371843338), (0.3333333432674408,
0.11764705926179886, 0.11764705926179886), (0.33838382363319397,
0.15686275064945221, 0.15686275064945221), (0.34343433380126953,
0.19607843458652496, 0.19607843458652496), (0.34848484396934509,
0.23137255012989044, 0.23137255012989044), (0.35353535413742065,
0.27058824896812439, 0.27058824896812439), (0.35858586430549622,
0.30980393290519714, 0.30980393290519714), (0.36363637447357178,
0.3490196168422699, 0.3490196168422699), (0.36868685483932495,
0.38431373238563538, 0.38431373238563538), (0.37373736500740051,
0.40392157435417175, 0.40392157435417175), (0.37878787517547607,
0.41568627953529358, 0.41568627953529358), (0.38383838534355164,
0.42352941632270813, 0.42352941632270813), (0.3888888955116272,
0.43137255311012268, 0.43137255311012268), (0.39393940567970276,
0.44313725829124451, 0.44313725829124451), (0.39898988604545593,
0.45098039507865906, 0.45098039507865906), (0.40404039621353149,
0.45882353186607361, 0.45882353186607361), (0.40909090638160706,
0.47058823704719543, 0.47058823704719543), (0.41414141654968262,
0.47843137383460999, 0.47843137383460999), (0.41919192671775818,
0.49019607901573181, 0.49019607901573181), (0.42424243688583374,
0.50196081399917603, 0.50196081399917603), (0.42929291725158691,
0.52549022436141968, 0.52549022436141968), (0.43434342741966248,
0.54901963472366333, 0.54901963472366333), (0.43939393758773804,
0.57254904508590698, 0.57254904508590698), (0.4444444477558136,
0.60000002384185791, 0.60000002384185791), (0.44949495792388916,
0.62352943420410156, 0.62352943420410156), (0.45454546809196472,
0.64705884456634521, 0.64705884456634521), (0.4595959484577179,
0.67058825492858887, 0.67058825492858887), (0.46464645862579346,
0.69411766529083252, 0.69411766529083252), (0.46969696879386902,
0.72156864404678345, 0.72156864404678345), (0.47474747896194458,
0.7450980544090271, 0.7450980544090271), (0.47979798913002014,
0.76862746477127075, 0.76862746477127075), (0.4848484992980957,
0.7921568751335144, 0.7921568751335144), (0.48989897966384888,
0.81568628549575806, 0.81568628549575806), (0.49494948983192444,
0.83921569585800171, 0.83921569585800171), (0.5, 0.86274510622024536,
0.86274510622024536), (0.50505048036575317, 0.88627451658248901,
0.88627451658248901), (0.51010102033615112, 0.90980392694473267,
0.90980392694473267), (0.5151515007019043, 0.93333333730697632,
0.93333333730697632), (0.52020204067230225, 0.95686274766921997,
0.95686274766921997), (0.52525252103805542, 0.98039215803146362,
0.98039215803146362), (0.53030300140380859, 1.0, 1.0),
(0.53535354137420654, 1.0, 1.0), (0.54040402173995972, 1.0, 1.0),
(0.54545456171035767, 1.0, 1.0), (0.55050504207611084, 1.0, 1.0),
(0.55555558204650879, 1.0, 1.0), (0.56060606241226196, 1.0, 1.0),
(0.56565654277801514, 1.0, 1.0), (0.57070708274841309, 1.0, 1.0),
(0.57575756311416626, 1.0, 1.0), (0.58080810308456421, 1.0, 1.0),
(0.58585858345031738, 1.0, 1.0), (0.59090906381607056, 1.0, 1.0),
(0.59595960378646851, 1.0, 1.0), (0.60101008415222168, 1.0, 1.0),
(0.60606062412261963, 1.0, 1.0), (0.6111111044883728, 1.0, 1.0),
(0.61616164445877075, 1.0, 1.0), (0.62121212482452393, 1.0, 1.0),
(0.6262626051902771, 1.0, 1.0), (0.63131314516067505, 1.0, 1.0),
(0.63636362552642822, 1.0, 1.0), (0.64141416549682617, 1.0, 1.0),
(0.64646464586257935, 1.0, 1.0), (0.65151512622833252, 1.0, 1.0),
(0.65656566619873047, 1.0, 1.0), (0.66161614656448364, 1.0, 1.0),
(0.66666668653488159, 1.0, 1.0), (0.67171716690063477, 1.0, 1.0),
(0.67676764726638794, 1.0, 1.0), (0.68181818723678589, 1.0, 1.0),
(0.68686866760253906, 1.0, 1.0), (0.69191920757293701, 1.0, 1.0),
(0.69696968793869019, 1.0, 1.0), (0.70202022790908813, 1.0, 1.0),
(0.70707070827484131, 1.0, 1.0), (0.71212118864059448, 1.0, 1.0),
(0.71717172861099243, 1.0, 1.0), (0.72222220897674561, 1.0, 1.0),
(0.72727274894714355, 1.0, 1.0), (0.73232322931289673, 1.0, 1.0),
(0.7373737096786499, 1.0, 1.0), (0.74242424964904785, 1.0, 1.0),
(0.74747473001480103, 1.0, 1.0), (0.75252526998519897, 1.0, 1.0),
(0.75757575035095215, 1.0, 1.0), (0.7626262903213501, 1.0, 1.0),
(0.76767677068710327, 1.0, 1.0), (0.77272725105285645, 1.0, 1.0),
(0.77777779102325439, 1.0, 1.0), (0.78282827138900757, 1.0, 1.0),
(0.78787881135940552, 1.0, 1.0), (0.79292929172515869, 1.0, 1.0),
(0.79797977209091187, 0.96470588445663452, 0.96470588445663452),
(0.80303031206130981, 0.92549020051956177, 0.92549020051956177),
(0.80808079242706299, 0.89019608497619629, 0.89019608497619629),
(0.81313133239746094, 0.85098040103912354, 0.85098040103912354),
(0.81818181276321411, 0.81568628549575806, 0.81568628549575806),
(0.82323235273361206, 0.7764706015586853, 0.7764706015586853),
(0.82828283309936523, 0.74117648601531982, 0.74117648601531982),
(0.83333331346511841, 0.70196080207824707, 0.70196080207824707),
(0.83838385343551636, 0.66666668653488159, 0.66666668653488159),
(0.84343433380126953, 0.62745100259780884, 0.62745100259780884),
(0.84848487377166748, 0.61960786581039429, 0.61960786581039429),
(0.85353535413742065, 0.65098041296005249, 0.65098041296005249),
(0.85858583450317383, 0.68235296010971069, 0.68235296010971069),
(0.86363637447357178, 0.7137255072593689, 0.7137255072593689),
(0.86868685483932495, 0.7450980544090271, 0.7450980544090271),
(0.8737373948097229, 0.77254903316497803, 0.77254903316497803),
(0.87878787517547607, 0.80392158031463623, 0.80392158031463623),
(0.88383835554122925, 0.83529412746429443, 0.83529412746429443),
(0.8888888955116272, 0.86666667461395264, 0.86666667461395264),
(0.89393937587738037, 0.89803922176361084, 0.89803922176361084),
(0.89898991584777832, 0.92941176891326904, 0.92941176891326904),
(0.90404039621353149, 0.93333333730697632, 0.93333333730697632),
(0.90909093618392944, 0.93725490570068359, 0.93725490570068359),
(0.91414141654968262, 0.93725490570068359, 0.93725490570068359),
(0.91919189691543579, 0.94117647409439087, 0.94117647409439087),
(0.92424243688583374, 0.94509804248809814, 0.94509804248809814),
(0.92929291725158691, 0.94509804248809814, 0.94509804248809814),
(0.93434345722198486, 0.94901961088180542, 0.94901961088180542),
(0.93939393758773804, 0.9529411792755127, 0.9529411792755127),
(0.94444441795349121, 0.9529411792755127, 0.9529411792755127),
(0.94949495792388916, 0.95686274766921997, 0.95686274766921997),
(0.95454543828964233, 0.96078431606292725, 0.96078431606292725),
(0.95959597826004028, 0.96470588445663452, 0.96470588445663452),
(0.96464645862579346, 0.9686274528503418, 0.9686274528503418),
(0.96969699859619141, 0.97254902124404907, 0.97254902124404907),
(0.97474747896194458, 0.97647058963775635, 0.97647058963775635),
(0.97979795932769775, 0.98039215803146362, 0.98039215803146362),
(0.9848484992980957, 0.9843137264251709, 0.9843137264251709),
(0.98989897966384888, 0.98823529481887817, 0.98823529481887817),
(0.99494951963424683, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)]}
_gist_rainbow_data = {'blue':
[(0.0, 0.16470588743686676, 0.16470588743686676), (0.0042016808874905109,
0.14117647707462311, 0.14117647707462311), (0.0084033617749810219,
0.12156862765550613, 0.12156862765550613), (0.012605042196810246,
0.10196078568696976, 0.10196078568696976), (0.016806723549962044,
0.078431375324726105, 0.078431375324726105), (0.021008403971791267,
0.058823529630899429, 0.058823529630899429), (0.025210084393620491,
0.039215687662363052, 0.039215687662363052), (0.029411764815449715,
0.015686275437474251, 0.015686275437474251), (0.033613447099924088, 0.0,
0.0), (0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0039215688593685627, 0.0039215688593685627),
(0.4117647111415863, 0.047058824449777603, 0.047058824449777603),
(0.41596639156341553, 0.066666670143604279, 0.066666670143604279),
(0.42016807198524475, 0.090196080505847931, 0.090196080505847931),
(0.42436975240707397, 0.10980392247438431, 0.10980392247438431),
(0.4285714328289032, 0.12941177189350128, 0.12941177189350128),
(0.43277311325073242, 0.15294118225574493, 0.15294118225574493),
(0.43697479367256165, 0.17254902422428131, 0.17254902422428131),
(0.44117647409439087, 0.19215686619281769, 0.19215686619281769),
(0.44537815451622009, 0.21568627655506134, 0.21568627655506134),
(0.44957983493804932, 0.23529411852359772, 0.23529411852359772),
(0.45378151535987854, 0.25882354378700256, 0.25882354378700256),
(0.45798319578170776, 0.27843138575553894, 0.27843138575553894),
(0.46218487620353699, 0.29803922772407532, 0.29803922772407532),
(0.46638655662536621, 0.32156863808631897, 0.32156863808631897),
(0.47058823704719543, 0.34117648005485535, 0.34117648005485535),
(0.47478991746902466, 0.38431373238563538, 0.38431373238563538),
(0.47899159789085388, 0.40392157435417175, 0.40392157435417175),
(0.48319327831268311, 0.42745098471641541, 0.42745098471641541),
(0.48739495873451233, 0.44705882668495178, 0.44705882668495178),
(0.49159663915634155, 0.46666666865348816, 0.46666666865348816),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.50980395078659058, 0.50980395078659058), (0.50420171022415161,
0.52941179275512695, 0.52941179275512695), (0.50840336084365845,
0.55294120311737061, 0.55294120311737061), (0.51260507106781006,
0.57254904508590698, 0.57254904508590698), (0.51680672168731689,
0.59607845544815063, 0.59607845544815063), (0.52100843191146851,
0.61568629741668701, 0.61568629741668701), (0.52521008253097534,
0.63529413938522339, 0.63529413938522339), (0.52941179275512695,
0.65882354974746704, 0.65882354974746704), (0.53361344337463379,
0.67843139171600342, 0.67843139171600342), (0.5378151535987854,
0.72156864404678345, 0.72156864404678345), (0.54201680421829224,
0.74117648601531982, 0.74117648601531982), (0.54621851444244385,
0.76470589637756348, 0.76470589637756348), (0.55042016506195068,
0.78431373834609985, 0.78431373834609985), (0.55462187528610229,
0.80392158031463623, 0.80392158031463623), (0.55882352590560913,
0.82745099067687988, 0.82745099067687988), (0.56302523612976074,
0.84705883264541626, 0.84705883264541626), (0.56722688674926758,
0.87058824300765991, 0.87058824300765991), (0.57142859697341919,
0.89019608497619629, 0.89019608497619629), (0.57563024759292603,
0.90980392694473267, 0.90980392694473267), (0.57983195781707764,
0.93333333730697632, 0.93333333730697632), (0.58403360843658447,
0.9529411792755127, 0.9529411792755127), (0.58823531866073608,
0.97254902124404907, 0.97254902124404907), (0.59243696928024292,
0.99607843160629272, 0.99607843160629272), (0.59663867950439453, 1.0,
1.0), (0.60084033012390137, 1.0, 1.0), (0.60504204034805298, 1.0, 1.0),
(0.60924369096755981, 1.0, 1.0), (0.61344540119171143, 1.0, 1.0),
(0.61764705181121826, 1.0, 1.0), (0.62184876203536987, 1.0, 1.0),
(0.62605041265487671, 1.0, 1.0), (0.63025212287902832, 1.0, 1.0),
(0.63445377349853516, 1.0, 1.0), (0.63865548372268677, 1.0, 1.0),
(0.6428571343421936, 1.0, 1.0), (0.64705884456634521, 1.0, 1.0),
(0.65126049518585205, 1.0, 1.0), (0.65546220541000366, 1.0, 1.0),
(0.6596638560295105, 1.0, 1.0), (0.66386556625366211, 1.0, 1.0),
(0.66806721687316895, 1.0, 1.0), (0.67226892709732056, 1.0, 1.0),
(0.67647057771682739, 1.0, 1.0), (0.680672287940979, 1.0, 1.0),
(0.68487393856048584, 1.0, 1.0), (0.68907564878463745, 1.0, 1.0),
(0.69327729940414429, 1.0, 1.0), (0.6974790096282959, 1.0, 1.0),
(0.70168066024780273, 1.0, 1.0), (0.70588237047195435, 1.0, 1.0),
(0.71008402109146118, 1.0, 1.0), (0.71428573131561279, 1.0, 1.0),
(0.71848738193511963, 1.0, 1.0), (0.72268909215927124, 1.0, 1.0),
(0.72689074277877808, 1.0, 1.0), (0.73109245300292969, 1.0, 1.0),
(0.73529410362243652, 1.0, 1.0), (0.73949581384658813, 1.0, 1.0),
(0.74369746446609497, 1.0, 1.0), (0.74789917469024658, 1.0, 1.0),
(0.75210082530975342, 1.0, 1.0), (0.75630253553390503, 1.0, 1.0),
(0.76050418615341187, 1.0, 1.0), (0.76470589637756348, 1.0, 1.0),
(0.76890754699707031, 1.0, 1.0), (0.77310925722122192, 1.0, 1.0),
(0.77731090784072876, 1.0, 1.0), (0.78151261806488037, 1.0, 1.0),
(0.78571426868438721, 1.0, 1.0), (0.78991597890853882, 1.0, 1.0),
(0.79411762952804565, 1.0, 1.0), (0.79831933975219727, 1.0, 1.0),
(0.8025209903717041, 1.0, 1.0), (0.80672270059585571, 1.0, 1.0),
(0.81092435121536255, 1.0, 1.0), (0.81512606143951416, 1.0, 1.0),
(0.819327712059021, 1.0, 1.0), (0.82352942228317261, 1.0, 1.0),
(0.82773107290267944, 1.0, 1.0), (0.83193278312683105, 1.0, 1.0),
(0.83613443374633789, 1.0, 1.0), (0.8403361439704895, 1.0, 1.0),
(0.84453779458999634, 1.0, 1.0), (0.84873950481414795, 1.0, 1.0),
(0.85294115543365479, 1.0, 1.0), (0.8571428656578064, 1.0, 1.0),
(0.86134451627731323, 1.0, 1.0), (0.86554622650146484, 1.0, 1.0),
(0.86974787712097168, 1.0, 1.0), (0.87394958734512329, 1.0, 1.0),
(0.87815123796463013, 1.0, 1.0), (0.88235294818878174, 1.0, 1.0),
(0.88655459880828857, 1.0, 1.0), (0.89075630903244019, 1.0, 1.0),
(0.89495795965194702, 1.0, 1.0), (0.89915966987609863, 1.0, 1.0),
(0.90336132049560547, 1.0, 1.0), (0.90756303071975708, 1.0, 1.0),
(0.91176468133926392, 1.0, 1.0), (0.91596639156341553, 1.0, 1.0),
(0.92016804218292236, 1.0, 1.0), (0.92436975240707397, 1.0, 1.0),
(0.92857140302658081, 1.0, 1.0), (0.93277311325073242, 1.0, 1.0),
(0.93697476387023926, 1.0, 1.0), (0.94117647409439087, 1.0, 1.0),
(0.94537812471389771, 1.0, 1.0), (0.94957983493804932, 1.0, 1.0),
(0.95378148555755615, 1.0, 1.0), (0.95798319578170776, 1.0, 1.0),
(0.9621848464012146, 1.0, 1.0), (0.96638655662536621, 0.99607843160629272,
0.99607843160629272), (0.97058820724487305, 0.97647058963775635,
0.97647058963775635), (0.97478991746902466, 0.9529411792755127,
0.9529411792755127), (0.97899156808853149, 0.91372549533843994,
0.91372549533843994), (0.98319327831268311, 0.89019608497619629,
0.89019608497619629), (0.98739492893218994, 0.87058824300765991,
0.87058824300765991), (0.99159663915634155, 0.85098040103912354,
0.85098040103912354), (0.99579828977584839, 0.82745099067687988,
0.82745099067687988), (1.0, 0.80784314870834351, 0.80784314870834351)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.019607843831181526, 0.019607843831181526),
(0.037815127521753311, 0.043137256056070328, 0.043137256056070328),
(0.042016807943582535, 0.062745101749897003, 0.062745101749897003),
(0.046218488365411758, 0.086274512112140656, 0.086274512112140656),
(0.050420168787240982, 0.10588235408067703, 0.10588235408067703),
(0.054621849209070206, 0.12549020349979401, 0.12549020349979401),
(0.058823529630899429, 0.14901961386203766, 0.14901961386203766),
(0.063025213778018951, 0.16862745583057404, 0.16862745583057404),
(0.067226894199848175, 0.18823529779911041, 0.18823529779911041),
(0.071428574621677399, 0.21176470816135406, 0.21176470816135406),
(0.075630255043506622, 0.23137255012989044, 0.23137255012989044),
(0.079831935465335846, 0.25490197539329529, 0.25490197539329529),
(0.08403361588716507, 0.27450981736183167, 0.27450981736183167),
(0.088235296308994293, 0.29411765933036804, 0.29411765933036804),
(0.092436976730823517, 0.31764706969261169, 0.31764706969261169),
(0.09663865715265274, 0.35686275362968445, 0.35686275362968445),
(0.10084033757448196, 0.3803921639919281, 0.3803921639919281),
(0.10504201799631119, 0.40000000596046448, 0.40000000596046448),
(0.10924369841814041, 0.42352941632270813, 0.42352941632270813),
(0.11344537883996964, 0.44313725829124451, 0.44313725829124451),
(0.11764705926179886, 0.46274510025978088, 0.46274510025978088),
(0.12184873968362808, 0.48627451062202454, 0.48627451062202454),
(0.1260504275560379, 0.5058823823928833, 0.5058823823928833),
(0.13025210797786713, 0.52941179275512695, 0.52941179275512695),
(0.13445378839969635, 0.54901963472366333, 0.54901963472366333),
(0.13865546882152557, 0.56862747669219971, 0.56862747669219971),
(0.1428571492433548, 0.59215688705444336, 0.59215688705444336),
(0.14705882966518402, 0.61176472902297974, 0.61176472902297974),
(0.15126051008701324, 0.63137257099151611, 0.63137257099151611),
(0.15546219050884247, 0.65490198135375977, 0.65490198135375977),
(0.15966387093067169, 0.69803923368453979, 0.69803923368453979),
(0.16386555135250092, 0.71764707565307617, 0.71764707565307617),
(0.16806723177433014, 0.73725491762161255, 0.73725491762161255),
(0.17226891219615936, 0.7607843279838562, 0.7607843279838562),
(0.17647059261798859, 0.78039216995239258, 0.78039216995239258),
(0.18067227303981781, 0.80000001192092896, 0.80000001192092896),
(0.18487395346164703, 0.82352942228317261, 0.82352942228317261),
(0.18907563388347626, 0.84313726425170898, 0.84313726425170898),
(0.19327731430530548, 0.86666667461395264, 0.86666667461395264),
(0.1974789947271347, 0.88627451658248901, 0.88627451658248901),
(0.20168067514896393, 0.90588235855102539, 0.90588235855102539),
(0.20588235557079315, 0.92941176891326904, 0.92941176891326904),
(0.21008403599262238, 0.94901961088180542, 0.94901961088180542),
(0.2142857164144516, 0.9686274528503418, 0.9686274528503418),
(0.21848739683628082, 0.99215686321258545, 0.99215686321258545),
(0.22268907725811005, 1.0, 1.0), (0.22689075767993927, 1.0, 1.0),
(0.23109243810176849, 1.0, 1.0), (0.23529411852359772, 1.0, 1.0),
(0.23949579894542694, 1.0, 1.0), (0.24369747936725616, 1.0, 1.0),
(0.24789915978908539, 1.0, 1.0), (0.25210085511207581, 1.0, 1.0),
(0.25630253553390503, 1.0, 1.0), (0.26050421595573425, 1.0, 1.0),
(0.26470589637756348, 1.0, 1.0), (0.2689075767993927, 1.0, 1.0),
(0.27310925722122192, 1.0, 1.0), (0.27731093764305115, 1.0, 1.0),
(0.28151261806488037, 1.0, 1.0), (0.28571429848670959, 1.0, 1.0),
(0.28991597890853882, 1.0, 1.0), (0.29411765933036804, 1.0, 1.0),
(0.29831933975219727, 1.0, 1.0), (0.30252102017402649, 1.0, 1.0),
(0.30672270059585571, 1.0, 1.0), (0.31092438101768494, 1.0, 1.0),
(0.31512606143951416, 1.0, 1.0), (0.31932774186134338, 1.0, 1.0),
(0.32352942228317261, 1.0, 1.0), (0.32773110270500183, 1.0, 1.0),
(0.33193278312683105, 1.0, 1.0), (0.33613446354866028, 1.0, 1.0),
(0.3403361439704895, 1.0, 1.0), (0.34453782439231873, 1.0, 1.0),
(0.34873950481414795, 1.0, 1.0), (0.35294118523597717, 1.0, 1.0),
(0.3571428656578064, 1.0, 1.0), (0.36134454607963562, 1.0, 1.0),
(0.36554622650146484, 1.0, 1.0), (0.36974790692329407, 1.0, 1.0),
(0.37394958734512329, 1.0, 1.0), (0.37815126776695251, 1.0, 1.0),
(0.38235294818878174, 1.0, 1.0), (0.38655462861061096, 1.0, 1.0),
(0.39075630903244019, 1.0, 1.0), (0.39495798945426941, 1.0, 1.0),
(0.39915966987609863, 1.0, 1.0), (0.40336135029792786, 1.0, 1.0),
(0.40756303071975708, 1.0, 1.0), (0.4117647111415863, 1.0, 1.0),
(0.41596639156341553, 1.0, 1.0), (0.42016807198524475, 1.0, 1.0),
(0.42436975240707397, 1.0, 1.0), (0.4285714328289032, 1.0, 1.0),
(0.43277311325073242, 1.0, 1.0), (0.43697479367256165, 1.0, 1.0),
(0.44117647409439087, 1.0, 1.0), (0.44537815451622009, 1.0, 1.0),
(0.44957983493804932, 1.0, 1.0), (0.45378151535987854, 1.0, 1.0),
(0.45798319578170776, 1.0, 1.0), (0.46218487620353699, 1.0, 1.0),
(0.46638655662536621, 1.0, 1.0), (0.47058823704719543, 1.0, 1.0),
(0.47478991746902466, 1.0, 1.0), (0.47899159789085388, 1.0, 1.0),
(0.48319327831268311, 1.0, 1.0), (0.48739495873451233, 1.0, 1.0),
(0.49159663915634155, 1.0, 1.0), (0.49579831957817078, 1.0, 1.0), (0.5,
1.0, 1.0), (0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 1.0,
1.0), (0.51260507106781006, 1.0, 1.0), (0.51680672168731689, 1.0, 1.0),
(0.52100843191146851, 1.0, 1.0), (0.52521008253097534, 1.0, 1.0),
(0.52941179275512695, 1.0, 1.0), (0.53361344337463379, 1.0, 1.0),
(0.5378151535987854, 1.0, 1.0), (0.54201680421829224, 1.0, 1.0),
(0.54621851444244385, 1.0, 1.0), (0.55042016506195068, 1.0, 1.0),
(0.55462187528610229, 1.0, 1.0), (0.55882352590560913, 1.0, 1.0),
(0.56302523612976074, 1.0, 1.0), (0.56722688674926758, 1.0, 1.0),
(0.57142859697341919, 1.0, 1.0), (0.57563024759292603, 1.0, 1.0),
(0.57983195781707764, 1.0, 1.0), (0.58403360843658447, 1.0, 1.0),
(0.58823531866073608, 1.0, 1.0), (0.59243696928024292, 1.0, 1.0),
(0.59663867950439453, 0.98039215803146362, 0.98039215803146362),
(0.60084033012390137, 0.93725490570068359, 0.93725490570068359),
(0.60504204034805298, 0.91764706373214722, 0.91764706373214722),
(0.60924369096755981, 0.89411765336990356, 0.89411765336990356),
(0.61344540119171143, 0.87450981140136719, 0.87450981140136719),
(0.61764705181121826, 0.85490196943283081, 0.85490196943283081),
(0.62184876203536987, 0.83137255907058716, 0.83137255907058716),
(0.62605041265487671, 0.81176471710205078, 0.81176471710205078),
(0.63025212287902832, 0.78823530673980713, 0.78823530673980713),
(0.63445377349853516, 0.76862746477127075, 0.76862746477127075),
(0.63865548372268677, 0.74901962280273438, 0.74901962280273438),
(0.6428571343421936, 0.72549021244049072, 0.72549021244049072),
(0.64705884456634521, 0.70588237047195435, 0.70588237047195435),
(0.65126049518585205, 0.68235296010971069, 0.68235296010971069),
(0.65546220541000366, 0.66274511814117432, 0.66274511814117432),
(0.6596638560295105, 0.64313727617263794, 0.64313727617263794),
(0.66386556625366211, 0.60000002384185791, 0.60000002384185791),
(0.66806721687316895, 0.58039218187332153, 0.58039218187332153),
(0.67226892709732056, 0.55686277151107788, 0.55686277151107788),
(0.67647057771682739, 0.5372549295425415, 0.5372549295425415),
(0.680672287940979, 0.51372551918029785, 0.51372551918029785),
(0.68487393856048584, 0.49411764740943909, 0.49411764740943909),
(0.68907564878463745, 0.47450980544090271, 0.47450980544090271),
(0.69327729940414429, 0.45098039507865906, 0.45098039507865906),
(0.6974790096282959, 0.43137255311012268, 0.43137255311012268),
(0.70168066024780273, 0.4117647111415863, 0.4117647111415863),
(0.70588237047195435, 0.38823530077934265, 0.38823530077934265),
(0.71008402109146118, 0.36862745881080627, 0.36862745881080627),
(0.71428573131561279, 0.34509804844856262, 0.34509804844856262),
(0.71848738193511963, 0.32549020648002625, 0.32549020648002625),
(0.72268909215927124, 0.30588236451148987, 0.30588236451148987),
(0.72689074277877808, 0.26274511218070984, 0.26274511218070984),
(0.73109245300292969, 0.24313725531101227, 0.24313725531101227),
(0.73529410362243652, 0.21960784494876862, 0.21960784494876862),
(0.73949581384658813, 0.20000000298023224, 0.20000000298023224),
(0.74369746446609497, 0.17647059261798859, 0.17647059261798859),
(0.74789917469024658, 0.15686275064945221, 0.15686275064945221),
(0.75210082530975342, 0.13725490868091583, 0.13725490868091583),
(0.75630253553390503, 0.11372549086809158, 0.11372549086809158),
(0.76050418615341187, 0.094117648899555206, 0.094117648899555206),
(0.76470589637756348, 0.070588238537311554, 0.070588238537311554),
(0.76890754699707031, 0.050980392843484879, 0.050980392843484879),
(0.77310925722122192, 0.031372550874948502, 0.031372550874948502),
(0.77731090784072876, 0.0078431377187371254, 0.0078431377187371254),
(0.78151261806488037, 0.0, 0.0), (0.78571426868438721, 0.0, 0.0),
(0.78991597890853882, 0.0, 0.0), (0.79411762952804565, 0.0, 0.0),
(0.79831933975219727, 0.0, 0.0), (0.8025209903717041, 0.0, 0.0),
(0.80672270059585571, 0.0, 0.0), (0.81092435121536255, 0.0, 0.0),
(0.81512606143951416, 0.0, 0.0), (0.819327712059021, 0.0, 0.0),
(0.82352942228317261, 0.0, 0.0), (0.82773107290267944, 0.0, 0.0),
(0.83193278312683105, 0.0, 0.0), (0.83613443374633789, 0.0, 0.0),
(0.8403361439704895, 0.0, 0.0), (0.84453779458999634, 0.0, 0.0),
(0.84873950481414795, 0.0, 0.0), (0.85294115543365479, 0.0, 0.0),
(0.8571428656578064, 0.0, 0.0), (0.86134451627731323, 0.0, 0.0),
(0.86554622650146484, 0.0, 0.0), (0.86974787712097168, 0.0, 0.0),
(0.87394958734512329, 0.0, 0.0), (0.87815123796463013, 0.0, 0.0),
(0.88235294818878174, 0.0, 0.0), (0.88655459880828857, 0.0, 0.0),
(0.89075630903244019, 0.0, 0.0), (0.89495795965194702, 0.0, 0.0),
(0.89915966987609863, 0.0, 0.0), (0.90336132049560547, 0.0, 0.0),
(0.90756303071975708, 0.0, 0.0), (0.91176468133926392, 0.0, 0.0),
(0.91596639156341553, 0.0, 0.0), (0.92016804218292236, 0.0, 0.0),
(0.92436975240707397, 0.0, 0.0), (0.92857140302658081, 0.0, 0.0),
(0.93277311325073242, 0.0, 0.0), (0.93697476387023926, 0.0, 0.0),
(0.94117647409439087, 0.0, 0.0), (0.94537812471389771, 0.0, 0.0),
(0.94957983493804932, 0.0, 0.0), (0.95378148555755615, 0.0, 0.0),
(0.95798319578170776, 0.0, 0.0), (0.9621848464012146, 0.0, 0.0),
(0.96638655662536621, 0.0, 0.0), (0.97058820724487305, 0.0, 0.0),
(0.97478991746902466, 0.0, 0.0), (0.97899156808853149, 0.0, 0.0),
(0.98319327831268311, 0.0, 0.0), (0.98739492893218994, 0.0, 0.0),
(0.99159663915634155, 0.0, 0.0), (0.99579828977584839, 0.0, 0.0), (1.0,
0.0, 0.0)], 'red': [(0.0, 1.0, 1.0), (0.0042016808874905109, 1.0, 1.0),
(0.0084033617749810219, 1.0, 1.0), (0.012605042196810246, 1.0, 1.0),
(0.016806723549962044, 1.0, 1.0), (0.021008403971791267, 1.0, 1.0),
(0.025210084393620491, 1.0, 1.0), (0.029411764815449715, 1.0, 1.0),
(0.033613447099924088, 1.0, 1.0), (0.037815127521753311, 1.0, 1.0),
(0.042016807943582535, 1.0, 1.0), (0.046218488365411758, 1.0, 1.0),
(0.050420168787240982, 1.0, 1.0), (0.054621849209070206, 1.0, 1.0),
(0.058823529630899429, 1.0, 1.0), (0.063025213778018951, 1.0, 1.0),
(0.067226894199848175, 1.0, 1.0), (0.071428574621677399, 1.0, 1.0),
(0.075630255043506622, 1.0, 1.0), (0.079831935465335846, 1.0, 1.0),
(0.08403361588716507, 1.0, 1.0), (0.088235296308994293, 1.0, 1.0),
(0.092436976730823517, 1.0, 1.0), (0.09663865715265274, 1.0, 1.0),
(0.10084033757448196, 1.0, 1.0), (0.10504201799631119, 1.0, 1.0),
(0.10924369841814041, 1.0, 1.0), (0.11344537883996964, 1.0, 1.0),
(0.11764705926179886, 1.0, 1.0), (0.12184873968362808, 1.0, 1.0),
(0.1260504275560379, 1.0, 1.0), (0.13025210797786713, 1.0, 1.0),
(0.13445378839969635, 1.0, 1.0), (0.13865546882152557, 1.0, 1.0),
(0.1428571492433548, 1.0, 1.0), (0.14705882966518402, 1.0, 1.0),
(0.15126051008701324, 1.0, 1.0), (0.15546219050884247, 1.0, 1.0),
(0.15966387093067169, 1.0, 1.0), (0.16386555135250092, 1.0, 1.0),
(0.16806723177433014, 1.0, 1.0), (0.17226891219615936, 1.0, 1.0),
(0.17647059261798859, 1.0, 1.0), (0.18067227303981781, 1.0, 1.0),
(0.18487395346164703, 1.0, 1.0), (0.18907563388347626, 1.0, 1.0),
(0.19327731430530548, 1.0, 1.0), (0.1974789947271347, 1.0, 1.0),
(0.20168067514896393, 1.0, 1.0), (0.20588235557079315, 1.0, 1.0),
(0.21008403599262238, 1.0, 1.0), (0.2142857164144516, 1.0, 1.0),
(0.21848739683628082, 1.0, 1.0), (0.22268907725811005,
0.96078431606292725, 0.96078431606292725), (0.22689075767993927,
0.94117647409439087, 0.94117647409439087), (0.23109243810176849,
0.92156863212585449, 0.92156863212585449), (0.23529411852359772,
0.89803922176361084, 0.89803922176361084), (0.23949579894542694,
0.87843137979507446, 0.87843137979507446), (0.24369747936725616,
0.85882353782653809, 0.85882353782653809), (0.24789915978908539,
0.83529412746429443, 0.83529412746429443), (0.25210085511207581,
0.81568628549575806, 0.81568628549575806), (0.25630253553390503,
0.7921568751335144, 0.7921568751335144), (0.26050421595573425,
0.77254903316497803, 0.77254903316497803), (0.26470589637756348,
0.75294119119644165, 0.75294119119644165), (0.2689075767993927,
0.729411780834198, 0.729411780834198), (0.27310925722122192,
0.70980393886566162, 0.70980393886566162), (0.27731093764305115,
0.68627452850341797, 0.68627452850341797), (0.28151261806488037,
0.66666668653488159, 0.66666668653488159), (0.28571429848670959,
0.62352943420410156, 0.62352943420410156), (0.28991597890853882,
0.60392159223556519, 0.60392159223556519), (0.29411765933036804,
0.58431375026702881, 0.58431375026702881), (0.29831933975219727,
0.56078433990478516, 0.56078433990478516), (0.30252102017402649,
0.54117649793624878, 0.54117649793624878), (0.30672270059585571,
0.51764708757400513, 0.51764708757400513), (0.31092438101768494,
0.49803921580314636, 0.49803921580314636), (0.31512606143951416,
0.47843137383460999, 0.47843137383460999), (0.31932774186134338,
0.45490196347236633, 0.45490196347236633), (0.32352942228317261,
0.43529412150382996, 0.43529412150382996), (0.32773110270500183,
0.41568627953529358, 0.41568627953529358), (0.33193278312683105,
0.39215686917304993, 0.39215686917304993), (0.33613446354866028,
0.37254902720451355, 0.37254902720451355), (0.3403361439704895,
0.3490196168422699, 0.3490196168422699), (0.34453782439231873,
0.32941177487373352, 0.32941177487373352), (0.34873950481414795,
0.28627452254295349, 0.28627452254295349), (0.35294118523597717,
0.26666668057441711, 0.26666668057441711), (0.3571428656578064,
0.24705882370471954, 0.24705882370471954), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.20392157137393951, 0.20392157137393951), (0.36974790692329407,
0.18039216101169586, 0.18039216101169586), (0.37394958734512329,
0.16078431904315948, 0.16078431904315948), (0.37815126776695251,
0.14117647707462311, 0.14117647707462311), (0.38235294818878174,
0.11764705926179886, 0.11764705926179886), (0.38655462861061096,
0.098039217293262482, 0.098039217293262482), (0.39075630903244019,
0.074509806931018829, 0.074509806931018829), (0.39495798945426941,
0.054901961237192154, 0.054901961237192154), (0.39915966987609863,
0.035294119268655777, 0.035294119268655777), (0.40336135029792786,
0.011764706112444401, 0.011764706112444401), (0.40756303071975708, 0.0,
0.0), (0.4117647111415863, 0.0, 0.0), (0.41596639156341553, 0.0, 0.0),
(0.42016807198524475, 0.0, 0.0), (0.42436975240707397, 0.0, 0.0),
(0.4285714328289032, 0.0, 0.0), (0.43277311325073242, 0.0, 0.0),
(0.43697479367256165, 0.0, 0.0), (0.44117647409439087, 0.0, 0.0),
(0.44537815451622009, 0.0, 0.0), (0.44957983493804932, 0.0, 0.0),
(0.45378151535987854, 0.0, 0.0), (0.45798319578170776, 0.0, 0.0),
(0.46218487620353699, 0.0, 0.0), (0.46638655662536621, 0.0, 0.0),
(0.47058823704719543, 0.0, 0.0), (0.47478991746902466, 0.0, 0.0),
(0.47899159789085388, 0.0, 0.0), (0.48319327831268311, 0.0, 0.0),
(0.48739495873451233, 0.0, 0.0), (0.49159663915634155, 0.0, 0.0),
(0.49579831957817078, 0.0, 0.0), (0.5, 0.0, 0.0), (0.50420171022415161,
0.0, 0.0), (0.50840336084365845, 0.0, 0.0), (0.51260507106781006, 0.0,
0.0), (0.51680672168731689, 0.0, 0.0), (0.52100843191146851, 0.0, 0.0),
(0.52521008253097534, 0.0, 0.0), (0.52941179275512695, 0.0, 0.0),
(0.53361344337463379, 0.0, 0.0), (0.5378151535987854, 0.0, 0.0),
(0.54201680421829224, 0.0, 0.0), (0.54621851444244385, 0.0, 0.0),
(0.55042016506195068, 0.0, 0.0), (0.55462187528610229, 0.0, 0.0),
(0.55882352590560913, 0.0, 0.0), (0.56302523612976074, 0.0, 0.0),
(0.56722688674926758, 0.0, 0.0), (0.57142859697341919, 0.0, 0.0),
(0.57563024759292603, 0.0, 0.0), (0.57983195781707764, 0.0, 0.0),
(0.58403360843658447, 0.0, 0.0), (0.58823531866073608, 0.0, 0.0),
(0.59243696928024292, 0.0, 0.0), (0.59663867950439453, 0.0, 0.0),
(0.60084033012390137, 0.0, 0.0), (0.60504204034805298, 0.0, 0.0),
(0.60924369096755981, 0.0, 0.0), (0.61344540119171143, 0.0, 0.0),
(0.61764705181121826, 0.0, 0.0), (0.62184876203536987, 0.0, 0.0),
(0.62605041265487671, 0.0, 0.0), (0.63025212287902832, 0.0, 0.0),
(0.63445377349853516, 0.0, 0.0), (0.63865548372268677, 0.0, 0.0),
(0.6428571343421936, 0.0, 0.0), (0.64705884456634521, 0.0, 0.0),
(0.65126049518585205, 0.0, 0.0), (0.65546220541000366, 0.0, 0.0),
(0.6596638560295105, 0.0, 0.0), (0.66386556625366211, 0.0, 0.0),
(0.66806721687316895, 0.0, 0.0), (0.67226892709732056, 0.0, 0.0),
(0.67647057771682739, 0.0, 0.0), (0.680672287940979, 0.0, 0.0),
(0.68487393856048584, 0.0, 0.0), (0.68907564878463745, 0.0, 0.0),
(0.69327729940414429, 0.0, 0.0), (0.6974790096282959, 0.0, 0.0),
(0.70168066024780273, 0.0, 0.0), (0.70588237047195435, 0.0, 0.0),
(0.71008402109146118, 0.0, 0.0), (0.71428573131561279, 0.0, 0.0),
(0.71848738193511963, 0.0, 0.0), (0.72268909215927124, 0.0, 0.0),
(0.72689074277877808, 0.0, 0.0), (0.73109245300292969, 0.0, 0.0),
(0.73529410362243652, 0.0, 0.0), (0.73949581384658813, 0.0, 0.0),
(0.74369746446609497, 0.0, 0.0), (0.74789917469024658, 0.0, 0.0),
(0.75210082530975342, 0.0, 0.0), (0.75630253553390503, 0.0, 0.0),
(0.76050418615341187, 0.0, 0.0), (0.76470589637756348, 0.0, 0.0),
(0.76890754699707031, 0.0, 0.0), (0.77310925722122192, 0.0, 0.0),
(0.77731090784072876, 0.0, 0.0), (0.78151261806488037,
0.0078431377187371254, 0.0078431377187371254), (0.78571426868438721,
0.027450980618596077, 0.027450980618596077), (0.78991597890853882,
0.070588238537311554, 0.070588238537311554), (0.79411762952804565,
0.094117648899555206, 0.094117648899555206), (0.79831933975219727,
0.11372549086809158, 0.11372549086809158), (0.8025209903717041,
0.13333334028720856, 0.13333334028720856), (0.80672270059585571,
0.15686275064945221, 0.15686275064945221), (0.81092435121536255,
0.17647059261798859, 0.17647059261798859), (0.81512606143951416,
0.19607843458652496, 0.19607843458652496), (0.819327712059021,
0.21960784494876862, 0.21960784494876862), (0.82352942228317261,
0.23921568691730499, 0.23921568691730499), (0.82773107290267944,
0.26274511218070984, 0.26274511218070984), (0.83193278312683105,
0.28235295414924622, 0.28235295414924622), (0.83613443374633789,
0.30196079611778259, 0.30196079611778259), (0.8403361439704895,
0.32549020648002625, 0.32549020648002625), (0.84453779458999634,
0.34509804844856262, 0.34509804844856262), (0.84873950481414795,
0.364705890417099, 0.364705890417099), (0.85294115543365479,
0.40784314274787903, 0.40784314274787903), (0.8571428656578064,
0.43137255311012268, 0.43137255311012268), (0.86134451627731323,
0.45098039507865906, 0.45098039507865906), (0.86554622650146484,
0.47058823704719543, 0.47058823704719543), (0.86974787712097168,
0.49411764740943909, 0.49411764740943909), (0.87394958734512329,
0.51372551918029785, 0.51372551918029785), (0.87815123796463013,
0.53333336114883423, 0.53333336114883423), (0.88235294818878174,
0.55686277151107788, 0.55686277151107788), (0.88655459880828857,
0.57647061347961426, 0.57647061347961426), (0.89075630903244019,
0.60000002384185791, 0.60000002384185791), (0.89495795965194702,
0.61960786581039429, 0.61960786581039429), (0.89915966987609863,
0.63921570777893066, 0.63921570777893066), (0.90336132049560547,
0.66274511814117432, 0.66274511814117432), (0.90756303071975708,
0.68235296010971069, 0.68235296010971069), (0.91176468133926392,
0.70588237047195435, 0.70588237047195435), (0.91596639156341553,
0.7450980544090271, 0.7450980544090271), (0.92016804218292236,
0.76862746477127075, 0.76862746477127075), (0.92436975240707397,
0.78823530673980713, 0.78823530673980713), (0.92857140302658081,
0.80784314870834351, 0.80784314870834351), (0.93277311325073242,
0.83137255907058716, 0.83137255907058716), (0.93697476387023926,
0.85098040103912354, 0.85098040103912354), (0.94117647409439087,
0.87450981140136719, 0.87450981140136719), (0.94537812471389771,
0.89411765336990356, 0.89411765336990356), (0.94957983493804932,
0.91372549533843994, 0.91372549533843994), (0.95378148555755615,
0.93725490570068359, 0.93725490570068359), (0.95798319578170776,
0.95686274766921997, 0.95686274766921997), (0.9621848464012146,
0.97647058963775635, 0.97647058963775635), (0.96638655662536621, 1.0,
1.0), (0.97058820724487305, 1.0, 1.0), (0.97478991746902466, 1.0, 1.0),
(0.97899156808853149, 1.0, 1.0), (0.98319327831268311, 1.0, 1.0),
(0.98739492893218994, 1.0, 1.0), (0.99159663915634155, 1.0, 1.0),
(0.99579828977584839, 1.0, 1.0), (1.0, 1.0, 1.0)]}
_gist_stern_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.011764706112444401,
0.011764706112444401), (0.012605042196810246, 0.019607843831181526,
0.019607843831181526), (0.016806723549962044, 0.027450980618596077,
0.027450980618596077), (0.021008403971791267, 0.035294119268655777,
0.035294119268655777), (0.025210084393620491, 0.043137256056070328,
0.043137256056070328), (0.029411764815449715, 0.050980392843484879,
0.050980392843484879), (0.033613447099924088, 0.058823529630899429,
0.058823529630899429), (0.037815127521753311, 0.066666670143604279,
0.066666670143604279), (0.042016807943582535, 0.08235294371843338,
0.08235294371843338), (0.046218488365411758, 0.090196080505847931,
0.090196080505847931), (0.050420168787240982, 0.098039217293262482,
0.098039217293262482), (0.054621849209070206, 0.10588235408067703,
0.10588235408067703), (0.058823529630899429, 0.11372549086809158,
0.11372549086809158), (0.063025213778018951, 0.12156862765550613,
0.12156862765550613), (0.067226894199848175, 0.12941177189350128,
0.12941177189350128), (0.071428574621677399, 0.13725490868091583,
0.13725490868091583), (0.075630255043506622, 0.14509804546833038,
0.14509804546833038), (0.079831935465335846, 0.15294118225574493,
0.15294118225574493), (0.08403361588716507, 0.16078431904315948,
0.16078431904315948), (0.088235296308994293, 0.16862745583057404,
0.16862745583057404), (0.092436976730823517, 0.17647059261798859,
0.17647059261798859), (0.09663865715265274, 0.18431372940540314,
0.18431372940540314), (0.10084033757448196, 0.19215686619281769,
0.19215686619281769), (0.10504201799631119, 0.20000000298023224,
0.20000000298023224), (0.10924369841814041, 0.20784313976764679,
0.20784313976764679), (0.11344537883996964, 0.21568627655506134,
0.21568627655506134), (0.11764705926179886, 0.22352941334247589,
0.22352941334247589), (0.12184873968362808, 0.23137255012989044,
0.23137255012989044), (0.1260504275560379, 0.24705882370471954,
0.24705882370471954), (0.13025210797786713, 0.25490197539329529,
0.25490197539329529), (0.13445378839969635, 0.26274511218070984,
0.26274511218070984), (0.13865546882152557, 0.27058824896812439,
0.27058824896812439), (0.1428571492433548, 0.27843138575553894,
0.27843138575553894), (0.14705882966518402, 0.28627452254295349,
0.28627452254295349), (0.15126051008701324, 0.29411765933036804,
0.29411765933036804), (0.15546219050884247, 0.30196079611778259,
0.30196079611778259), (0.15966387093067169, 0.30980393290519714,
0.30980393290519714), (0.16386555135250092, 0.31764706969261169,
0.31764706969261169), (0.16806723177433014, 0.32549020648002625,
0.32549020648002625), (0.17226891219615936, 0.3333333432674408,
0.3333333432674408), (0.17647059261798859, 0.34117648005485535,
0.34117648005485535), (0.18067227303981781, 0.3490196168422699,
0.3490196168422699), (0.18487395346164703, 0.35686275362968445,
0.35686275362968445), (0.18907563388347626, 0.364705890417099,
0.364705890417099), (0.19327731430530548, 0.37254902720451355,
0.37254902720451355), (0.1974789947271347, 0.3803921639919281,
0.3803921639919281), (0.20168067514896393, 0.38823530077934265,
0.38823530077934265), (0.20588235557079315, 0.3960784375667572,
0.3960784375667572), (0.21008403599262238, 0.4117647111415863,
0.4117647111415863), (0.2142857164144516, 0.41960784792900085,
0.41960784792900085), (0.21848739683628082, 0.42745098471641541,
0.42745098471641541), (0.22268907725811005, 0.43529412150382996,
0.43529412150382996), (0.22689075767993927, 0.44313725829124451,
0.44313725829124451), (0.23109243810176849, 0.45098039507865906,
0.45098039507865906), (0.23529411852359772, 0.45882353186607361,
0.45882353186607361), (0.23949579894542694, 0.46666666865348816,
0.46666666865348816), (0.24369747936725616, 0.47450980544090271,
0.47450980544090271), (0.24789915978908539, 0.48235294222831726,
0.48235294222831726), (0.25210085511207581, 0.49803921580314636,
0.49803921580314636), (0.25630253553390503, 0.5058823823928833,
0.5058823823928833), (0.26050421595573425, 0.51372551918029785,
0.51372551918029785), (0.26470589637756348, 0.5215686559677124,
0.5215686559677124), (0.2689075767993927, 0.52941179275512695,
0.52941179275512695), (0.27310925722122192, 0.5372549295425415,
0.5372549295425415), (0.27731093764305115, 0.54509806632995605,
0.54509806632995605), (0.28151261806488037, 0.55294120311737061,
0.55294120311737061), (0.28571429848670959, 0.56078433990478516,
0.56078433990478516), (0.28991597890853882, 0.56862747669219971,
0.56862747669219971), (0.29411765933036804, 0.58431375026702881,
0.58431375026702881), (0.29831933975219727, 0.59215688705444336,
0.59215688705444336), (0.30252102017402649, 0.60000002384185791,
0.60000002384185791), (0.30672270059585571, 0.60784316062927246,
0.60784316062927246), (0.31092438101768494, 0.61568629741668701,
0.61568629741668701), (0.31512606143951416, 0.62352943420410156,
0.62352943420410156), (0.31932774186134338, 0.63137257099151611,
0.63137257099151611), (0.32352942228317261, 0.63921570777893066,
0.63921570777893066), (0.32773110270500183, 0.64705884456634521,
0.64705884456634521), (0.33193278312683105, 0.65490198135375977,
0.65490198135375977), (0.33613446354866028, 0.66274511814117432,
0.66274511814117432), (0.3403361439704895, 0.67058825492858887,
0.67058825492858887), (0.34453782439231873, 0.67843139171600342,
0.67843139171600342), (0.34873950481414795, 0.68627452850341797,
0.68627452850341797), (0.35294118523597717, 0.69411766529083252,
0.69411766529083252), (0.3571428656578064, 0.70196080207824707,
0.70196080207824707), (0.36134454607963562, 0.70980393886566162,
0.70980393886566162), (0.36554622650146484, 0.71764707565307617,
0.71764707565307617), (0.36974790692329407, 0.72549021244049072,
0.72549021244049072), (0.37394958734512329, 0.73333334922790527,
0.73333334922790527), (0.37815126776695251, 0.74901962280273438,
0.74901962280273438), (0.38235294818878174, 0.75686275959014893,
0.75686275959014893), (0.38655462861061096, 0.76470589637756348,
0.76470589637756348), (0.39075630903244019, 0.77254903316497803,
0.77254903316497803), (0.39495798945426941, 0.78039216995239258,
0.78039216995239258), (0.39915966987609863, 0.78823530673980713,
0.78823530673980713), (0.40336135029792786, 0.79607844352722168,
0.79607844352722168), (0.40756303071975708, 0.80392158031463623,
0.80392158031463623), (0.4117647111415863, 0.81176471710205078,
0.81176471710205078), (0.41596639156341553, 0.81960785388946533,
0.81960785388946533), (0.42016807198524475, 0.82745099067687988,
0.82745099067687988), (0.42436975240707397, 0.83529412746429443,
0.83529412746429443), (0.4285714328289032, 0.84313726425170898,
0.84313726425170898), (0.43277311325073242, 0.85098040103912354,
0.85098040103912354), (0.43697479367256165, 0.85882353782653809,
0.85882353782653809), (0.44117647409439087, 0.86666667461395264,
0.86666667461395264), (0.44537815451622009, 0.87450981140136719,
0.87450981140136719), (0.44957983493804932, 0.88235294818878174,
0.88235294818878174), (0.45378151535987854, 0.89019608497619629,
0.89019608497619629), (0.45798319578170776, 0.89803922176361084,
0.89803922176361084), (0.46218487620353699, 0.91372549533843994,
0.91372549533843994), (0.46638655662536621, 0.92156863212585449,
0.92156863212585449), (0.47058823704719543, 0.92941176891326904,
0.92941176891326904), (0.47478991746902466, 0.93725490570068359,
0.93725490570068359), (0.47899159789085388, 0.94509804248809814,
0.94509804248809814), (0.48319327831268311, 0.9529411792755127,
0.9529411792755127), (0.48739495873451233, 0.96078431606292725,
0.96078431606292725), (0.49159663915634155, 0.9686274528503418,
0.9686274528503418), (0.49579831957817078, 0.97647058963775635,
0.97647058963775635), (0.5, 0.9843137264251709, 0.9843137264251709),
(0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 0.9843137264251709,
0.9843137264251709), (0.51260507106781006, 0.9686274528503418,
0.9686274528503418), (0.51680672168731689, 0.9529411792755127,
0.9529411792755127), (0.52100843191146851, 0.93333333730697632,
0.93333333730697632), (0.52521008253097534, 0.91764706373214722,
0.91764706373214722), (0.52941179275512695, 0.90196079015731812,
0.90196079015731812), (0.53361344337463379, 0.88627451658248901,
0.88627451658248901), (0.5378151535987854, 0.86666667461395264,
0.86666667461395264), (0.54201680421829224, 0.85098040103912354,
0.85098040103912354), (0.54621851444244385, 0.81960785388946533,
0.81960785388946533), (0.55042016506195068, 0.80000001192092896,
0.80000001192092896), (0.55462187528610229, 0.78431373834609985,
0.78431373834609985), (0.55882352590560913, 0.76862746477127075,
0.76862746477127075), (0.56302523612976074, 0.75294119119644165,
0.75294119119644165), (0.56722688674926758, 0.73333334922790527,
0.73333334922790527), (0.57142859697341919, 0.71764707565307617,
0.71764707565307617), (0.57563024759292603, 0.70196080207824707,
0.70196080207824707), (0.57983195781707764, 0.68627452850341797,
0.68627452850341797), (0.58403360843658447, 0.66666668653488159,
0.66666668653488159), (0.58823531866073608, 0.65098041296005249,
0.65098041296005249), (0.59243696928024292, 0.63529413938522339,
0.63529413938522339), (0.59663867950439453, 0.61960786581039429,
0.61960786581039429), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.58431375026702881,
0.58431375026702881), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.55294120311737061,
0.55294120311737061), (0.61764705181121826, 0.53333336114883423,
0.53333336114883423), (0.62184876203536987, 0.51764708757400513,
0.51764708757400513), (0.62605041265487671, 0.50196081399917603,
0.50196081399917603), (0.63025212287902832, 0.46666666865348816,
0.46666666865348816), (0.63445377349853516, 0.45098039507865906,
0.45098039507865906), (0.63865548372268677, 0.43529412150382996,
0.43529412150382996), (0.6428571343421936, 0.41960784792900085,
0.41960784792900085), (0.64705884456634521, 0.40000000596046448,
0.40000000596046448), (0.65126049518585205, 0.38431373238563538,
0.38431373238563538), (0.65546220541000366, 0.36862745881080627,
0.36862745881080627), (0.6596638560295105, 0.35294118523597717,
0.35294118523597717), (0.66386556625366211, 0.3333333432674408,
0.3333333432674408), (0.66806721687316895, 0.31764706969261169,
0.31764706969261169), (0.67226892709732056, 0.30196079611778259,
0.30196079611778259), (0.67647057771682739, 0.28627452254295349,
0.28627452254295349), (0.680672287940979, 0.26666668057441711,
0.26666668057441711), (0.68487393856048584, 0.25098040699958801,
0.25098040699958801), (0.68907564878463745, 0.23529411852359772,
0.23529411852359772), (0.69327729940414429, 0.21960784494876862,
0.21960784494876862), (0.6974790096282959, 0.20000000298023224,
0.20000000298023224), (0.70168066024780273, 0.18431372940540314,
0.18431372940540314), (0.70588237047195435, 0.16862745583057404,
0.16862745583057404), (0.71008402109146118, 0.15294118225574493,
0.15294118225574493), (0.71428573131561279, 0.11764705926179886,
0.11764705926179886), (0.71848738193511963, 0.10196078568696976,
0.10196078568696976), (0.72268909215927124, 0.086274512112140656,
0.086274512112140656), (0.72689074277877808, 0.066666670143604279,
0.066666670143604279), (0.73109245300292969, 0.050980392843484879,
0.050980392843484879), (0.73529410362243652, 0.035294119268655777,
0.035294119268655777), (0.73949581384658813, 0.019607843831181526,
0.019607843831181526), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.011764706112444401, 0.011764706112444401),
(0.75210082530975342, 0.027450980618596077, 0.027450980618596077),
(0.75630253553390503, 0.058823529630899429, 0.058823529630899429),
(0.76050418615341187, 0.074509806931018829, 0.074509806931018829),
(0.76470589637756348, 0.086274512112140656, 0.086274512112140656),
(0.76890754699707031, 0.10196078568696976, 0.10196078568696976),
(0.77310925722122192, 0.11764705926179886, 0.11764705926179886),
(0.77731090784072876, 0.13333334028720856, 0.13333334028720856),
(0.78151261806488037, 0.14901961386203766, 0.14901961386203766),
(0.78571426868438721, 0.16078431904315948, 0.16078431904315948),
(0.78991597890853882, 0.17647059261798859, 0.17647059261798859),
(0.79411762952804565, 0.19215686619281769, 0.19215686619281769),
(0.79831933975219727, 0.22352941334247589, 0.22352941334247589),
(0.8025209903717041, 0.23529411852359772, 0.23529411852359772),
(0.80672270059585571, 0.25098040699958801, 0.25098040699958801),
(0.81092435121536255, 0.26666668057441711, 0.26666668057441711),
(0.81512606143951416, 0.28235295414924622, 0.28235295414924622),
(0.819327712059021, 0.29803922772407532, 0.29803922772407532),
(0.82352942228317261, 0.30980393290519714, 0.30980393290519714),
(0.82773107290267944, 0.32549020648002625, 0.32549020648002625),
(0.83193278312683105, 0.34117648005485535, 0.34117648005485535),
(0.83613443374633789, 0.35686275362968445, 0.35686275362968445),
(0.8403361439704895, 0.37254902720451355, 0.37254902720451355),
(0.84453779458999634, 0.38431373238563538, 0.38431373238563538),
(0.84873950481414795, 0.40000000596046448, 0.40000000596046448),
(0.85294115543365479, 0.41568627953529358, 0.41568627953529358),
(0.8571428656578064, 0.43137255311012268, 0.43137255311012268),
(0.86134451627731323, 0.44705882668495178, 0.44705882668495178),
(0.86554622650146484, 0.45882353186607361, 0.45882353186607361),
(0.86974787712097168, 0.47450980544090271, 0.47450980544090271),
(0.87394958734512329, 0.49019607901573181, 0.49019607901573181),
(0.87815123796463013, 0.5058823823928833, 0.5058823823928833),
(0.88235294818878174, 0.5372549295425415, 0.5372549295425415),
(0.88655459880828857, 0.54901963472366333, 0.54901963472366333),
(0.89075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.89495795965194702, 0.58039218187332153, 0.58039218187332153),
(0.89915966987609863, 0.59607845544815063, 0.59607845544815063),
(0.90336132049560547, 0.61176472902297974, 0.61176472902297974),
(0.90756303071975708, 0.62352943420410156, 0.62352943420410156),
(0.91176468133926392, 0.63921570777893066, 0.63921570777893066),
(0.91596639156341553, 0.65490198135375977, 0.65490198135375977),
(0.92016804218292236, 0.67058825492858887, 0.67058825492858887),
(0.92436975240707397, 0.68627452850341797, 0.68627452850341797),
(0.92857140302658081, 0.69803923368453979, 0.69803923368453979),
(0.93277311325073242, 0.7137255072593689, 0.7137255072593689),
(0.93697476387023926, 0.729411780834198, 0.729411780834198),
(0.94117647409439087, 0.7450980544090271, 0.7450980544090271),
(0.94537812471389771, 0.7607843279838562, 0.7607843279838562),
(0.94957983493804932, 0.77254903316497803, 0.77254903316497803),
(0.95378148555755615, 0.78823530673980713, 0.78823530673980713),
(0.95798319578170776, 0.80392158031463623, 0.80392158031463623),
(0.9621848464012146, 0.81960785388946533, 0.81960785388946533),
(0.96638655662536621, 0.84705883264541626, 0.84705883264541626),
(0.97058820724487305, 0.86274510622024536, 0.86274510622024536),
(0.97478991746902466, 0.87843137979507446, 0.87843137979507446),
(0.97899156808853149, 0.89411765336990356, 0.89411765336990356),
(0.98319327831268311, 0.90980392694473267, 0.90980392694473267),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.031372550874948502, 0.031372550874948502),
(0.037815127521753311, 0.035294119268655777, 0.035294119268655777),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.094117648899555206, 0.094117648899555206),
(0.10084033757448196, 0.098039217293262482, 0.098039217293262482),
(0.10504201799631119, 0.10196078568696976, 0.10196078568696976),
(0.10924369841814041, 0.10588235408067703, 0.10588235408067703),
(0.11344537883996964, 0.10980392247438431, 0.10980392247438431),
(0.11764705926179886, 0.11372549086809158, 0.11372549086809158),
(0.12184873968362808, 0.11764705926179886, 0.11764705926179886),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.15686275064945221, 0.15686275064945221),
(0.16386555135250092, 0.16078431904315948, 0.16078431904315948),
(0.16806723177433014, 0.16470588743686676, 0.16470588743686676),
(0.17226891219615936, 0.16862745583057404, 0.16862745583057404),
(0.17647059261798859, 0.17254902422428131, 0.17254902422428131),
(0.18067227303981781, 0.17647059261798859, 0.17647059261798859),
(0.18487395346164703, 0.18039216101169586, 0.18039216101169586),
(0.18907563388347626, 0.18431372940540314, 0.18431372940540314),
(0.19327731430530548, 0.18823529779911041, 0.18823529779911041),
(0.1974789947271347, 0.19215686619281769, 0.19215686619281769),
(0.20168067514896393, 0.19607843458652496, 0.19607843458652496),
(0.20588235557079315, 0.20000000298023224, 0.20000000298023224),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.21960784494876862, 0.21960784494876862),
(0.22689075767993927, 0.22352941334247589, 0.22352941334247589),
(0.23109243810176849, 0.22745098173618317, 0.22745098173618317),
(0.23529411852359772, 0.23137255012989044, 0.23137255012989044),
(0.23949579894542694, 0.23529411852359772, 0.23529411852359772),
(0.24369747936725616, 0.23921568691730499, 0.23921568691730499),
(0.24789915978908539, 0.24313725531101227, 0.24313725531101227),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28235295414924622, 0.28235295414924622),
(0.28991597890853882, 0.28627452254295349, 0.28627452254295349),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.34509804844856262, 0.34509804844856262),
(0.35294118523597717, 0.3490196168422699, 0.3490196168422699),
(0.3571428656578064, 0.35294118523597717, 0.35294118523597717),
(0.36134454607963562, 0.35686275362968445, 0.35686275362968445),
(0.36554622650146484, 0.36078432202339172, 0.36078432202339172),
(0.36974790692329407, 0.364705890417099, 0.364705890417099),
(0.37394958734512329, 0.36862745881080627, 0.36862745881080627),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.40784314274787903, 0.40784314274787903),
(0.41596639156341553, 0.4117647111415863, 0.4117647111415863),
(0.42016807198524475, 0.41568627953529358, 0.41568627953529358),
(0.42436975240707397, 0.41960784792900085, 0.41960784792900085),
(0.4285714328289032, 0.42352941632270813, 0.42352941632270813),
(0.43277311325073242, 0.42745098471641541, 0.42745098471641541),
(0.43697479367256165, 0.43137255311012268, 0.43137255311012268),
(0.44117647409439087, 0.43529412150382996, 0.43529412150382996),
(0.44537815451622009, 0.43921568989753723, 0.43921568989753723),
(0.44957983493804932, 0.44313725829124451, 0.44313725829124451),
(0.45378151535987854, 0.44705882668495178, 0.44705882668495178),
(0.45798319578170776, 0.45098039507865906, 0.45098039507865906),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47058823704719543, 0.47058823704719543),
(0.47899159789085388, 0.47450980544090271, 0.47450980544090271),
(0.48319327831268311, 0.47843137383460999, 0.47843137383460999),
(0.48739495873451233, 0.48235294222831726, 0.48235294222831726),
(0.49159663915634155, 0.48627451062202454, 0.48627451062202454),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.49411764740943909, 0.49411764740943909), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.53333336114883423, 0.53333336114883423), (0.54201680421829224,
0.5372549295425415, 0.5372549295425415), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.59607845544815063, 0.59607845544815063), (0.60504204034805298,
0.60000002384185791, 0.60000002384185791), (0.60924369096755981,
0.60392159223556519, 0.60392159223556519), (0.61344540119171143,
0.60784316062927246, 0.60784316062927246), (0.61764705181121826,
0.61176472902297974, 0.61176472902297974), (0.62184876203536987,
0.61568629741668701, 0.61568629741668701), (0.62605041265487671,
0.61960786581039429, 0.61960786581039429), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.65882354974746704, 0.65882354974746704), (0.66806721687316895,
0.66274511814117432, 0.66274511814117432), (0.67226892709732056,
0.66666668653488159, 0.66666668653488159), (0.67647057771682739,
0.67058825492858887, 0.67058825492858887), (0.680672287940979,
0.67450982332229614, 0.67450982332229614), (0.68487393856048584,
0.67843139171600342, 0.67843139171600342), (0.68907564878463745,
0.68235296010971069, 0.68235296010971069), (0.69327729940414429,
0.68627452850341797, 0.68627452850341797), (0.6974790096282959,
0.69019609689712524, 0.69019609689712524), (0.70168066024780273,
0.69411766529083252, 0.69411766529083252), (0.70588237047195435,
0.69803923368453979, 0.69803923368453979), (0.71008402109146118,
0.70196080207824707, 0.70196080207824707), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72156864404678345, 0.72156864404678345), (0.73109245300292969,
0.72549021244049072, 0.72549021244049072), (0.73529410362243652,
0.729411780834198, 0.729411780834198), (0.73949581384658813,
0.73333334922790527, 0.73333334922790527), (0.74369746446609497,
0.73725491762161255, 0.73725491762161255), (0.74789917469024658,
0.74117648601531982, 0.74117648601531982), (0.75210082530975342,
0.7450980544090271, 0.7450980544090271), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78431373834609985, 0.78431373834609985), (0.79411762952804565,
0.78823530673980713, 0.78823530673980713), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.84705883264541626, 0.84705883264541626), (0.8571428656578064,
0.85098040103912354, 0.85098040103912354), (0.86134451627731323,
0.85490196943283081, 0.85490196943283081), (0.86554622650146484,
0.85882353782653809, 0.85882353782653809), (0.86974787712097168,
0.86274510622024536, 0.86274510622024536), (0.87394958734512329,
0.86666667461395264, 0.86666667461395264), (0.87815123796463013,
0.87058824300765991, 0.87058824300765991), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.90980392694473267, 0.90980392694473267), (0.92016804218292236,
0.91372549533843994, 0.91372549533843994), (0.92436975240707397,
0.91764706373214722, 0.91764706373214722), (0.92857140302658081,
0.92156863212585449, 0.92156863212585449), (0.93277311325073242,
0.92549020051956177, 0.92549020051956177), (0.93697476387023926,
0.92941176891326904, 0.92941176891326904), (0.94117647409439087,
0.93333333730697632, 0.93333333730697632), (0.94537812471389771,
0.93725490570068359, 0.93725490570068359), (0.94957983493804932,
0.94117647409439087, 0.94117647409439087), (0.95378148555755615,
0.94509804248809814, 0.94509804248809814), (0.95798319578170776,
0.94901961088180542, 0.94901961088180542), (0.9621848464012146,
0.9529411792755127, 0.9529411792755127), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97254902124404907, 0.97254902124404907), (0.98319327831268311,
0.97647058963775635, 0.97647058963775635), (0.98739492893218994,
0.98039215803146362, 0.98039215803146362), (0.99159663915634155,
0.9843137264251709, 0.9843137264251709), (0.99579828977584839,
0.98823529481887817, 0.98823529481887817), (1.0, 0.99215686321258545,
0.99215686321258545)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.070588238537311554, 0.070588238537311554), (0.0084033617749810219,
0.14117647707462311, 0.14117647707462311), (0.012605042196810246,
0.21176470816135406, 0.21176470816135406), (0.016806723549962044,
0.28235295414924622, 0.28235295414924622), (0.021008403971791267,
0.35294118523597717, 0.35294118523597717), (0.025210084393620491,
0.42352941632270813, 0.42352941632270813), (0.029411764815449715,
0.49803921580314636, 0.49803921580314636), (0.033613447099924088,
0.56862747669219971, 0.56862747669219971), (0.037815127521753311,
0.63921570777893066, 0.63921570777893066), (0.042016807943582535,
0.78039216995239258, 0.78039216995239258), (0.046218488365411758,
0.85098040103912354, 0.85098040103912354), (0.050420168787240982,
0.92156863212585449, 0.92156863212585449), (0.054621849209070206,
0.99607843160629272, 0.99607843160629272), (0.058823529630899429,
0.97647058963775635, 0.97647058963775635), (0.063025213778018951,
0.95686274766921997, 0.95686274766921997), (0.067226894199848175,
0.93725490570068359, 0.93725490570068359), (0.071428574621677399,
0.91764706373214722, 0.91764706373214722), (0.075630255043506622,
0.89803922176361084, 0.89803922176361084), (0.079831935465335846,
0.87450981140136719, 0.87450981140136719), (0.08403361588716507,
0.85490196943283081, 0.85490196943283081), (0.088235296308994293,
0.83529412746429443, 0.83529412746429443), (0.092436976730823517,
0.81568628549575806, 0.81568628549575806), (0.09663865715265274,
0.79607844352722168, 0.79607844352722168), (0.10084033757448196,
0.77254903316497803, 0.77254903316497803), (0.10504201799631119,
0.75294119119644165, 0.75294119119644165), (0.10924369841814041,
0.73333334922790527, 0.73333334922790527), (0.11344537883996964,
0.7137255072593689, 0.7137255072593689), (0.11764705926179886,
0.69411766529083252, 0.69411766529083252), (0.12184873968362808,
0.67450982332229614, 0.67450982332229614), (0.1260504275560379,
0.63137257099151611, 0.63137257099151611), (0.13025210797786713,
0.61176472902297974, 0.61176472902297974), (0.13445378839969635,
0.59215688705444336, 0.59215688705444336), (0.13865546882152557,
0.57254904508590698, 0.57254904508590698), (0.1428571492433548,
0.54901963472366333, 0.54901963472366333), (0.14705882966518402,
0.52941179275512695, 0.52941179275512695), (0.15126051008701324,
0.50980395078659058, 0.50980395078659058), (0.15546219050884247,
0.49019607901573181, 0.49019607901573181), (0.15966387093067169,
0.47058823704719543, 0.47058823704719543), (0.16386555135250092,
0.45098039507865906, 0.45098039507865906), (0.16806723177433014,
0.42745098471641541, 0.42745098471641541), (0.17226891219615936,
0.40784314274787903, 0.40784314274787903), (0.17647059261798859,
0.38823530077934265, 0.38823530077934265), (0.18067227303981781,
0.36862745881080627, 0.36862745881080627), (0.18487395346164703,
0.3490196168422699, 0.3490196168422699), (0.18907563388347626,
0.32549020648002625, 0.32549020648002625), (0.19327731430530548,
0.30588236451148987, 0.30588236451148987), (0.1974789947271347,
0.28627452254295349, 0.28627452254295349), (0.20168067514896393,
0.26666668057441711, 0.26666668057441711), (0.20588235557079315,
0.24705882370471954, 0.24705882370471954), (0.21008403599262238,
0.20392157137393951, 0.20392157137393951), (0.2142857164144516,
0.18431372940540314, 0.18431372940540314), (0.21848739683628082,
0.16470588743686676, 0.16470588743686676), (0.22268907725811005,
0.14509804546833038, 0.14509804546833038), (0.22689075767993927,
0.12549020349979401, 0.12549020349979401), (0.23109243810176849,
0.10196078568696976, 0.10196078568696976), (0.23529411852359772,
0.08235294371843338, 0.08235294371843338), (0.23949579894542694,
0.062745101749897003, 0.062745101749897003), (0.24369747936725616,
0.043137256056070328, 0.043137256056070328), (0.24789915978908539,
0.023529412224888802, 0.023529412224888802), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28235295414924622, 0.28235295414924622), (0.28991597890853882,
0.28627452254295349, 0.28627452254295349), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.34509804844856262, 0.34509804844856262), (0.35294118523597717,
0.3490196168422699, 0.3490196168422699), (0.3571428656578064,
0.35294118523597717, 0.35294118523597717), (0.36134454607963562,
0.35686275362968445, 0.35686275362968445), (0.36554622650146484,
0.36078432202339172, 0.36078432202339172), (0.36974790692329407,
0.364705890417099, 0.364705890417099), (0.37394958734512329,
0.36862745881080627, 0.36862745881080627), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.40784314274787903, 0.40784314274787903), (0.41596639156341553,
0.4117647111415863, 0.4117647111415863), (0.42016807198524475,
0.41568627953529358, 0.41568627953529358), (0.42436975240707397,
0.41960784792900085, 0.41960784792900085), (0.4285714328289032,
0.42352941632270813, 0.42352941632270813), (0.43277311325073242,
0.42745098471641541, 0.42745098471641541), (0.43697479367256165,
0.43137255311012268, 0.43137255311012268), (0.44117647409439087,
0.43529412150382996, 0.43529412150382996), (0.44537815451622009,
0.43921568989753723, 0.43921568989753723), (0.44957983493804932,
0.44313725829124451, 0.44313725829124451), (0.45378151535987854,
0.44705882668495178, 0.44705882668495178), (0.45798319578170776,
0.45098039507865906, 0.45098039507865906), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47058823704719543, 0.47058823704719543), (0.47899159789085388,
0.47450980544090271, 0.47450980544090271), (0.48319327831268311,
0.47843137383460999, 0.47843137383460999), (0.48739495873451233,
0.48235294222831726, 0.48235294222831726), (0.49159663915634155,
0.48627451062202454, 0.48627451062202454), (0.49579831957817078,
0.49019607901573181, 0.49019607901573181), (0.5, 0.49411764740943909,
0.49411764740943909), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.53333336114883423,
0.53333336114883423), (0.54201680421829224, 0.5372549295425415,
0.5372549295425415), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.59607845544815063,
0.59607845544815063), (0.60504204034805298, 0.60000002384185791,
0.60000002384185791), (0.60924369096755981, 0.60392159223556519,
0.60392159223556519), (0.61344540119171143, 0.60784316062927246,
0.60784316062927246), (0.61764705181121826, 0.61176472902297974,
0.61176472902297974), (0.62184876203536987, 0.61568629741668701,
0.61568629741668701), (0.62605041265487671, 0.61960786581039429,
0.61960786581039429), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66274511814117432,
0.66274511814117432), (0.67226892709732056, 0.66666668653488159,
0.66666668653488159), (0.67647057771682739, 0.67058825492858887,
0.67058825492858887), (0.680672287940979, 0.67450982332229614,
0.67450982332229614), (0.68487393856048584, 0.67843139171600342,
0.67843139171600342), (0.68907564878463745, 0.68235296010971069,
0.68235296010971069), (0.69327729940414429, 0.68627452850341797,
0.68627452850341797), (0.6974790096282959, 0.69019609689712524,
0.69019609689712524), (0.70168066024780273, 0.69411766529083252,
0.69411766529083252), (0.70588237047195435, 0.69803923368453979,
0.69803923368453979), (0.71008402109146118, 0.70196080207824707,
0.70196080207824707), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72156864404678345,
0.72156864404678345), (0.73109245300292969, 0.72549021244049072,
0.72549021244049072), (0.73529410362243652, 0.729411780834198,
0.729411780834198), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.74117648601531982,
0.74117648601531982), (0.75210082530975342, 0.7450980544090271,
0.7450980544090271), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78431373834609985,
0.78431373834609985), (0.79411762952804565, 0.78823530673980713,
0.78823530673980713), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.84705883264541626,
0.84705883264541626), (0.8571428656578064, 0.85098040103912354,
0.85098040103912354), (0.86134451627731323, 0.85490196943283081,
0.85490196943283081), (0.86554622650146484, 0.85882353782653809,
0.85882353782653809), (0.86974787712097168, 0.86274510622024536,
0.86274510622024536), (0.87394958734512329, 0.86666667461395264,
0.86666667461395264), (0.87815123796463013, 0.87058824300765991,
0.87058824300765991), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.90980392694473267,
0.90980392694473267), (0.92016804218292236, 0.91372549533843994,
0.91372549533843994), (0.92436975240707397, 0.91764706373214722,
0.91764706373214722), (0.92857140302658081, 0.92156863212585449,
0.92156863212585449), (0.93277311325073242, 0.92549020051956177,
0.92549020051956177), (0.93697476387023926, 0.92941176891326904,
0.92941176891326904), (0.94117647409439087, 0.93333333730697632,
0.93333333730697632), (0.94537812471389771, 0.93725490570068359,
0.93725490570068359), (0.94957983493804932, 0.94117647409439087,
0.94117647409439087), (0.95378148555755615, 0.94509804248809814,
0.94509804248809814), (0.95798319578170776, 0.94901961088180542,
0.94901961088180542), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97254902124404907,
0.97254902124404907), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_yarg_data = {'blue': [(0.0, 1.0, 1.0), (0.0042016808874905109,
0.99607843160629272, 0.99607843160629272), (0.0084033617749810219,
0.99215686321258545, 0.99215686321258545), (0.012605042196810246,
0.98823529481887817, 0.98823529481887817), (0.016806723549962044,
0.9843137264251709, 0.9843137264251709), (0.021008403971791267,
0.98039215803146362, 0.98039215803146362), (0.025210084393620491,
0.97647058963775635, 0.97647058963775635), (0.029411764815449715,
0.97254902124404907, 0.97254902124404907), (0.033613447099924088,
0.96470588445663452, 0.96470588445663452), (0.037815127521753311,
0.96078431606292725, 0.96078431606292725), (0.042016807943582535,
0.95686274766921997, 0.95686274766921997), (0.046218488365411758,
0.9529411792755127, 0.9529411792755127), (0.050420168787240982,
0.94901961088180542, 0.94901961088180542), (0.054621849209070206,
0.94509804248809814, 0.94509804248809814), (0.058823529630899429,
0.94117647409439087, 0.94117647409439087), (0.063025213778018951,
0.93725490570068359, 0.93725490570068359), (0.067226894199848175,
0.93333333730697632, 0.93333333730697632), (0.071428574621677399,
0.92941176891326904, 0.92941176891326904), (0.075630255043506622,
0.92549020051956177, 0.92549020051956177), (0.079831935465335846,
0.92156863212585449, 0.92156863212585449), (0.08403361588716507,
0.91764706373214722, 0.91764706373214722), (0.088235296308994293,
0.91372549533843994, 0.91372549533843994), (0.092436976730823517,
0.90980392694473267, 0.90980392694473267), (0.09663865715265274,
0.90196079015731812, 0.90196079015731812), (0.10084033757448196,
0.89803922176361084, 0.89803922176361084), (0.10504201799631119,
0.89411765336990356, 0.89411765336990356), (0.10924369841814041,
0.89019608497619629, 0.89019608497619629), (0.11344537883996964,
0.88627451658248901, 0.88627451658248901), (0.11764705926179886,
0.88235294818878174, 0.88235294818878174), (0.12184873968362808,
0.87843137979507446, 0.87843137979507446), (0.1260504275560379,
0.87450981140136719, 0.87450981140136719), (0.13025210797786713,
0.87058824300765991, 0.87058824300765991), (0.13445378839969635,
0.86666667461395264, 0.86666667461395264), (0.13865546882152557,
0.86274510622024536, 0.86274510622024536), (0.1428571492433548,
0.85882353782653809, 0.85882353782653809), (0.14705882966518402,
0.85490196943283081, 0.85490196943283081), (0.15126051008701324,
0.85098040103912354, 0.85098040103912354), (0.15546219050884247,
0.84705883264541626, 0.84705883264541626), (0.15966387093067169,
0.83921569585800171, 0.83921569585800171), (0.16386555135250092,
0.83529412746429443, 0.83529412746429443), (0.16806723177433014,
0.83137255907058716, 0.83137255907058716), (0.17226891219615936,
0.82745099067687988, 0.82745099067687988), (0.17647059261798859,
0.82352942228317261, 0.82352942228317261), (0.18067227303981781,
0.81960785388946533, 0.81960785388946533), (0.18487395346164703,
0.81568628549575806, 0.81568628549575806), (0.18907563388347626,
0.81176471710205078, 0.81176471710205078), (0.19327731430530548,
0.80784314870834351, 0.80784314870834351), (0.1974789947271347,
0.80392158031463623, 0.80392158031463623), (0.20168067514896393,
0.80000001192092896, 0.80000001192092896), (0.20588235557079315,
0.79607844352722168, 0.79607844352722168), (0.21008403599262238,
0.7921568751335144, 0.7921568751335144), (0.2142857164144516,
0.78823530673980713, 0.78823530673980713), (0.21848739683628082,
0.78431373834609985, 0.78431373834609985), (0.22268907725811005,
0.7764706015586853, 0.7764706015586853), (0.22689075767993927,
0.77254903316497803, 0.77254903316497803), (0.23109243810176849,
0.76862746477127075, 0.76862746477127075), (0.23529411852359772,
0.76470589637756348, 0.76470589637756348), (0.23949579894542694,
0.7607843279838562, 0.7607843279838562), (0.24369747936725616,
0.75686275959014893, 0.75686275959014893), (0.24789915978908539,
0.75294119119644165, 0.75294119119644165), (0.25210085511207581,
0.74901962280273438, 0.74901962280273438), (0.25630253553390503,
0.7450980544090271, 0.7450980544090271), (0.26050421595573425,
0.74117648601531982, 0.74117648601531982), (0.26470589637756348,
0.73725491762161255, 0.73725491762161255), (0.2689075767993927,
0.73333334922790527, 0.73333334922790527), (0.27310925722122192,
0.729411780834198, 0.729411780834198), (0.27731093764305115,
0.72549021244049072, 0.72549021244049072), (0.28151261806488037,
0.72156864404678345, 0.72156864404678345), (0.28571429848670959,
0.7137255072593689, 0.7137255072593689), (0.28991597890853882,
0.70980393886566162, 0.70980393886566162), (0.29411765933036804,
0.70588237047195435, 0.70588237047195435), (0.29831933975219727,
0.70196080207824707, 0.70196080207824707), (0.30252102017402649,
0.69803923368453979, 0.69803923368453979), (0.30672270059585571,
0.69411766529083252, 0.69411766529083252), (0.31092438101768494,
0.69019609689712524, 0.69019609689712524), (0.31512606143951416,
0.68627452850341797, 0.68627452850341797), (0.31932774186134338,
0.68235296010971069, 0.68235296010971069), (0.32352942228317261,
0.67843139171600342, 0.67843139171600342), (0.32773110270500183,
0.67450982332229614, 0.67450982332229614), (0.33193278312683105,
0.67058825492858887, 0.67058825492858887), (0.33613446354866028,
0.66666668653488159, 0.66666668653488159), (0.3403361439704895,
0.66274511814117432, 0.66274511814117432), (0.34453782439231873,
0.65882354974746704, 0.65882354974746704), (0.34873950481414795,
0.65098041296005249, 0.65098041296005249), (0.35294118523597717,
0.64705884456634521, 0.64705884456634521), (0.3571428656578064,
0.64313727617263794, 0.64313727617263794), (0.36134454607963562,
0.63921570777893066, 0.63921570777893066), (0.36554622650146484,
0.63529413938522339, 0.63529413938522339), (0.36974790692329407,
0.63137257099151611, 0.63137257099151611), (0.37394958734512329,
0.62745100259780884, 0.62745100259780884), (0.37815126776695251,
0.62352943420410156, 0.62352943420410156), (0.38235294818878174,
0.61960786581039429, 0.61960786581039429), (0.38655462861061096,
0.61568629741668701, 0.61568629741668701), (0.39075630903244019,
0.61176472902297974, 0.61176472902297974), (0.39495798945426941,
0.60784316062927246, 0.60784316062927246), (0.39915966987609863,
0.60392159223556519, 0.60392159223556519), (0.40336135029792786,
0.60000002384185791, 0.60000002384185791), (0.40756303071975708,
0.59607845544815063, 0.59607845544815063), (0.4117647111415863,
0.58823531866073608, 0.58823531866073608), (0.41596639156341553,
0.58431375026702881, 0.58431375026702881), (0.42016807198524475,
0.58039218187332153, 0.58039218187332153), (0.42436975240707397,
0.57647061347961426, 0.57647061347961426), (0.4285714328289032,
0.57254904508590698, 0.57254904508590698), (0.43277311325073242,
0.56862747669219971, 0.56862747669219971), (0.43697479367256165,
0.56470590829849243, 0.56470590829849243), (0.44117647409439087,
0.56078433990478516, 0.56078433990478516), (0.44537815451622009,
0.55686277151107788, 0.55686277151107788), (0.44957983493804932,
0.55294120311737061, 0.55294120311737061), (0.45378151535987854,
0.54901963472366333, 0.54901963472366333), (0.45798319578170776,
0.54509806632995605, 0.54509806632995605), (0.46218487620353699,
0.54117649793624878, 0.54117649793624878), (0.46638655662536621,
0.5372549295425415, 0.5372549295425415), (0.47058823704719543,
0.53333336114883423, 0.53333336114883423), (0.47478991746902466,
0.52549022436141968, 0.52549022436141968), (0.47899159789085388,
0.5215686559677124, 0.5215686559677124), (0.48319327831268311,
0.51764708757400513, 0.51764708757400513), (0.48739495873451233,
0.51372551918029785, 0.51372551918029785), (0.49159663915634155,
0.50980395078659058, 0.50980395078659058), (0.49579831957817078,
0.5058823823928833, 0.5058823823928833), (0.5, 0.50196081399917603,
0.50196081399917603), (0.50420171022415161, 0.49803921580314636,
0.49803921580314636), (0.50840336084365845, 0.49411764740943909,
0.49411764740943909), (0.51260507106781006, 0.49019607901573181,
0.49019607901573181), (0.51680672168731689, 0.48627451062202454,
0.48627451062202454), (0.52100843191146851, 0.48235294222831726,
0.48235294222831726), (0.52521008253097534, 0.47843137383460999,
0.47843137383460999), (0.52941179275512695, 0.47450980544090271,
0.47450980544090271), (0.53361344337463379, 0.47058823704719543,
0.47058823704719543), (0.5378151535987854, 0.46274510025978088,
0.46274510025978088), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.45490196347236633,
0.45490196347236633), (0.55042016506195068, 0.45098039507865906,
0.45098039507865906), (0.55462187528610229, 0.44705882668495178,
0.44705882668495178), (0.55882352590560913, 0.44313725829124451,
0.44313725829124451), (0.56302523612976074, 0.43921568989753723,
0.43921568989753723), (0.56722688674926758, 0.43529412150382996,
0.43529412150382996), (0.57142859697341919, 0.43137255311012268,
0.43137255311012268), (0.57563024759292603, 0.42745098471641541,
0.42745098471641541), (0.57983195781707764, 0.42352941632270813,
0.42352941632270813), (0.58403360843658447, 0.41960784792900085,
0.41960784792900085), (0.58823531866073608, 0.41568627953529358,
0.41568627953529358), (0.59243696928024292, 0.4117647111415863,
0.4117647111415863), (0.59663867950439453, 0.40784314274787903,
0.40784314274787903), (0.60084033012390137, 0.40000000596046448,
0.40000000596046448), (0.60504204034805298, 0.3960784375667572,
0.3960784375667572), (0.60924369096755981, 0.39215686917304993,
0.39215686917304993), (0.61344540119171143, 0.38823530077934265,
0.38823530077934265), (0.61764705181121826, 0.38431373238563538,
0.38431373238563538), (0.62184876203536987, 0.3803921639919281,
0.3803921639919281), (0.62605041265487671, 0.37647059559822083,
0.37647059559822083), (0.63025212287902832, 0.37254902720451355,
0.37254902720451355), (0.63445377349853516, 0.36862745881080627,
0.36862745881080627), (0.63865548372268677, 0.364705890417099,
0.364705890417099), (0.6428571343421936, 0.36078432202339172,
0.36078432202339172), (0.64705884456634521, 0.35686275362968445,
0.35686275362968445), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.3490196168422699,
0.3490196168422699), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.33725491166114807,
0.33725491166114807), (0.66806721687316895, 0.3333333432674408,
0.3333333432674408), (0.67226892709732056, 0.32941177487373352,
0.32941177487373352), (0.67647057771682739, 0.32549020648002625,
0.32549020648002625), (0.680672287940979, 0.32156863808631897,
0.32156863808631897), (0.68487393856048584, 0.31764706969261169,
0.31764706969261169), (0.68907564878463745, 0.31372550129890442,
0.31372550129890442), (0.69327729940414429, 0.30980393290519714,
0.30980393290519714), (0.6974790096282959, 0.30588236451148987,
0.30588236451148987), (0.70168066024780273, 0.30196079611778259,
0.30196079611778259), (0.70588237047195435, 0.29803922772407532,
0.29803922772407532), (0.71008402109146118, 0.29411765933036804,
0.29411765933036804), (0.71428573131561279, 0.29019609093666077,
0.29019609093666077), (0.71848738193511963, 0.28627452254295349,
0.28627452254295349), (0.72268909215927124, 0.28235295414924622,
0.28235295414924622), (0.72689074277877808, 0.27450981736183167,
0.27450981736183167), (0.73109245300292969, 0.27058824896812439,
0.27058824896812439), (0.73529410362243652, 0.26666668057441711,
0.26666668057441711), (0.73949581384658813, 0.26274511218070984,
0.26274511218070984), (0.74369746446609497, 0.25882354378700256,
0.25882354378700256), (0.74789917469024658, 0.25490197539329529,
0.25490197539329529), (0.75210082530975342, 0.25098040699958801,
0.25098040699958801), (0.75630253553390503, 0.24705882370471954,
0.24705882370471954), (0.76050418615341187, 0.24313725531101227,
0.24313725531101227), (0.76470589637756348, 0.23921568691730499,
0.23921568691730499), (0.76890754699707031, 0.23529411852359772,
0.23529411852359772), (0.77310925722122192, 0.23137255012989044,
0.23137255012989044), (0.77731090784072876, 0.22745098173618317,
0.22745098173618317), (0.78151261806488037, 0.22352941334247589,
0.22352941334247589), (0.78571426868438721, 0.21960784494876862,
0.21960784494876862), (0.78991597890853882, 0.21176470816135406,
0.21176470816135406), (0.79411762952804565, 0.20784313976764679,
0.20784313976764679), (0.79831933975219727, 0.20392157137393951,
0.20392157137393951), (0.8025209903717041, 0.20000000298023224,
0.20000000298023224), (0.80672270059585571, 0.19607843458652496,
0.19607843458652496), (0.81092435121536255, 0.19215686619281769,
0.19215686619281769), (0.81512606143951416, 0.18823529779911041,
0.18823529779911041), (0.819327712059021, 0.18431372940540314,
0.18431372940540314), (0.82352942228317261, 0.18039216101169586,
0.18039216101169586), (0.82773107290267944, 0.17647059261798859,
0.17647059261798859), (0.83193278312683105, 0.17254902422428131,
0.17254902422428131), (0.83613443374633789, 0.16862745583057404,
0.16862745583057404), (0.8403361439704895, 0.16470588743686676,
0.16470588743686676), (0.84453779458999634, 0.16078431904315948,
0.16078431904315948), (0.84873950481414795, 0.15686275064945221,
0.15686275064945221), (0.85294115543365479, 0.14901961386203766,
0.14901961386203766), (0.8571428656578064, 0.14509804546833038,
0.14509804546833038), (0.86134451627731323, 0.14117647707462311,
0.14117647707462311), (0.86554622650146484, 0.13725490868091583,
0.13725490868091583), (0.86974787712097168, 0.13333334028720856,
0.13333334028720856), (0.87394958734512329, 0.12941177189350128,
0.12941177189350128), (0.87815123796463013, 0.12549020349979401,
0.12549020349979401), (0.88235294818878174, 0.12156862765550613,
0.12156862765550613), (0.88655459880828857, 0.11764705926179886,
0.11764705926179886), (0.89075630903244019, 0.11372549086809158,
0.11372549086809158), (0.89495795965194702, 0.10980392247438431,
0.10980392247438431), (0.89915966987609863, 0.10588235408067703,
0.10588235408067703), (0.90336132049560547, 0.10196078568696976,
0.10196078568696976), (0.90756303071975708, 0.098039217293262482,
0.098039217293262482), (0.91176468133926392, 0.094117648899555206,
0.094117648899555206), (0.91596639156341553, 0.086274512112140656,
0.086274512112140656), (0.92016804218292236, 0.08235294371843338,
0.08235294371843338), (0.92436975240707397, 0.078431375324726105,
0.078431375324726105), (0.92857140302658081, 0.074509806931018829,
0.074509806931018829), (0.93277311325073242, 0.070588238537311554,
0.070588238537311554), (0.93697476387023926, 0.066666670143604279,
0.066666670143604279), (0.94117647409439087, 0.062745101749897003,
0.062745101749897003), (0.94537812471389771, 0.058823529630899429,
0.058823529630899429), (0.94957983493804932, 0.054901961237192154,
0.054901961237192154), (0.95378148555755615, 0.050980392843484879,
0.050980392843484879), (0.95798319578170776, 0.047058824449777603,
0.047058824449777603), (0.9621848464012146, 0.043137256056070328,
0.043137256056070328), (0.96638655662536621, 0.039215687662363052,
0.039215687662363052), (0.97058820724487305, 0.035294119268655777,
0.035294119268655777), (0.97478991746902466, 0.031372550874948502,
0.031372550874948502), (0.97899156808853149, 0.023529412224888802,
0.023529412224888802), (0.98319327831268311, 0.019607843831181526,
0.019607843831181526), (0.98739492893218994, 0.015686275437474251,
0.015686275437474251), (0.99159663915634155, 0.011764706112444401,
0.011764706112444401), (0.99579828977584839, 0.0078431377187371254,
0.0078431377187371254), (1.0, 0.0039215688593685627,
0.0039215688593685627)], 'green': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)], 'red': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)]}
Accent = colors.LinearSegmentedColormap('Accent', _Accent_data, LUTSIZE)
Blues = colors.LinearSegmentedColormap('Blues', _Blues_data, LUTSIZE)
BrBG = colors.LinearSegmentedColormap('BrBG', _BrBG_data, LUTSIZE)
BuGn = colors.LinearSegmentedColormap('BuGn', _BuGn_data, LUTSIZE)
BuPu = colors.LinearSegmentedColormap('BuPu', _BuPu_data, LUTSIZE)
Dark2 = colors.LinearSegmentedColormap('Dark2', _Dark2_data, LUTSIZE)
GnBu = colors.LinearSegmentedColormap('GnBu', _GnBu_data, LUTSIZE)
Greens = colors.LinearSegmentedColormap('Greens', _Greens_data, LUTSIZE)
Greys = colors.LinearSegmentedColormap('Greys', _Greys_data, LUTSIZE)
Oranges = colors.LinearSegmentedColormap('Oranges', _Oranges_data, LUTSIZE)
OrRd = colors.LinearSegmentedColormap('OrRd', _OrRd_data, LUTSIZE)
Paired = colors.LinearSegmentedColormap('Paired', _Paired_data, LUTSIZE)
Pastel1 = colors.LinearSegmentedColormap('Pastel1', _Pastel1_data, LUTSIZE)
Pastel2 = colors.LinearSegmentedColormap('Pastel2', _Pastel2_data, LUTSIZE)
PiYG = colors.LinearSegmentedColormap('PiYG', _PiYG_data, LUTSIZE)
PRGn = colors.LinearSegmentedColormap('PRGn', _PRGn_data, LUTSIZE)
PuBu = colors.LinearSegmentedColormap('PuBu', _PuBu_data, LUTSIZE)
PuBuGn = colors.LinearSegmentedColormap('PuBuGn', _PuBuGn_data, LUTSIZE)
PuOr = colors.LinearSegmentedColormap('PuOr', _PuOr_data, LUTSIZE)
PuRd = colors.LinearSegmentedColormap('PuRd', _PuRd_data, LUTSIZE)
Purples = colors.LinearSegmentedColormap('Purples', _Purples_data, LUTSIZE)
RdBu = colors.LinearSegmentedColormap('RdBu', _RdBu_data, LUTSIZE)
RdGy = colors.LinearSegmentedColormap('RdGy', _RdGy_data, LUTSIZE)
RdPu = colors.LinearSegmentedColormap('RdPu', _RdPu_data, LUTSIZE)
RdYlBu = colors.LinearSegmentedColormap('RdYlBu', _RdYlBu_data, LUTSIZE)
RdYlGn = colors.LinearSegmentedColormap('RdYlGn', _RdYlGn_data, LUTSIZE)
Reds = colors.LinearSegmentedColormap('Reds', _Reds_data, LUTSIZE)
Set1 = colors.LinearSegmentedColormap('Set1', _Set1_data, LUTSIZE)
Set2 = colors.LinearSegmentedColormap('Set2', _Set2_data, LUTSIZE)
Set3 = colors.LinearSegmentedColormap('Set3', _Set3_data, LUTSIZE)
Spectral = colors.LinearSegmentedColormap('Spectral', _Spectral_data, LUTSIZE)
YlGn = colors.LinearSegmentedColormap('YlGn', _YlGn_data, LUTSIZE)
YlGnBu = colors.LinearSegmentedColormap('YlGnBu', _YlGnBu_data, LUTSIZE)
YlOrBr = colors.LinearSegmentedColormap('YlOrBr', _YlOrBr_data, LUTSIZE)
YlOrRd = colors.LinearSegmentedColormap('YlOrRd', _YlOrRd_data, LUTSIZE)
gist_earth = colors.LinearSegmentedColormap('gist_earth', _gist_earth_data, LUTSIZE)
gist_gray = colors.LinearSegmentedColormap('gist_gray', _gist_gray_data, LUTSIZE)
gist_heat = colors.LinearSegmentedColormap('gist_heat', _gist_heat_data, LUTSIZE)
gist_ncar = colors.LinearSegmentedColormap('gist_ncar', _gist_ncar_data, LUTSIZE)
gist_rainbow = colors.LinearSegmentedColormap('gist_rainbow', _gist_rainbow_data, LUTSIZE)
gist_stern = colors.LinearSegmentedColormap('gist_stern', _gist_stern_data, LUTSIZE)
gist_yarg = colors.LinearSegmentedColormap('gist_yarg', _gist_yarg_data, LUTSIZE)
datad['Accent']=_Accent_data
datad['Blues']=_Blues_data
datad['BrBG']=_BrBG_data
datad['BuGn']=_BuGn_data
datad['BuPu']=_BuPu_data
datad['Dark2']=_Dark2_data
datad['GnBu']=_GnBu_data
datad['Greens']=_Greens_data
datad['Greys']=_Greys_data
datad['Oranges']=_Oranges_data
datad['OrRd']=_OrRd_data
datad['Paired']=_Paired_data
datad['Pastel1']=_Pastel1_data
datad['Pastel2']=_Pastel2_data
datad['PiYG']=_PiYG_data
datad['PRGn']=_PRGn_data
datad['PuBu']=_PuBu_data
datad['PuBuGn']=_PuBuGn_data
datad['PuOr']=_PuOr_data
datad['PuRd']=_PuRd_data
datad['Purples']=_Purples_data
datad['RdBu']=_RdBu_data
datad['RdGy']=_RdGy_data
datad['RdPu']=_RdPu_data
datad['RdYlBu']=_RdYlBu_data
datad['RdYlGn']=_RdYlGn_data
datad['Reds']=_Reds_data
datad['Set1']=_Set1_data
datad['Set2']=_Set2_data
datad['Set3']=_Set3_data
datad['Spectral']=_Spectral_data
datad['YlGn']=_YlGn_data
datad['YlGnBu']=_YlGnBu_data
datad['YlOrBr']=_YlOrBr_data
datad['YlOrRd']=_YlOrRd_data
datad['gist_earth']=_gist_earth_data
datad['gist_gray']=_gist_gray_data
datad['gist_heat']=_gist_heat_data
datad['gist_ncar']=_gist_ncar_data
datad['gist_rainbow']=_gist_rainbow_data
datad['gist_stern']=_gist_stern_data
datad['gist_yarg']=_gist_yarg_data
# reverse all the colormaps.
# reversed colormaps have '_r' appended to the name.
def revcmap(data):
data_r = {}
for key, val in data.iteritems():
valnew = [(1.-a, b, c) for a, b, c in reversed(val)]
data_r[key] = valnew
return data_r
cmapnames = datad.keys()
for cmapname in cmapnames:
cmapname_r = cmapname+'_r'
cmapdat_r = revcmap(datad[cmapname])
datad[cmapname_r] = cmapdat_r
locals()[cmapname_r] = colors.LinearSegmentedColormap(cmapname_r, cmapdat_r, LUTSIZE)
| gpl-3.0 |
gfyoung/pandas | pandas/tests/scalar/timestamp/test_arithmetic.py | 4 | 9023 | from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import (
OutOfBoundsDatetime,
Timedelta,
Timestamp,
offsets,
to_offset,
)
import pandas._testing as tm
class TestTimestampArithmetic:
def test_overflow_offset(self):
# no overflow expected
stamp = Timestamp("2000/1/1")
offset_no_overflow = to_offset("D") * 100
expected = Timestamp("2000/04/10")
assert stamp + offset_no_overflow == expected
assert offset_no_overflow + stamp == expected
expected = Timestamp("1999/09/23")
assert stamp - offset_no_overflow == expected
def test_overflow_offset_raises(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
# ends up multiplying really large numbers which overflow
stamp = Timestamp("2017-01-13 00:00:00", freq="D")
offset_overflow = 20169940 * offsets.Day(1)
msg = (
"the add operation between "
r"\<-?\d+ \* Days\> and \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} "
"will overflow"
)
lmsg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=lmsg):
stamp + offset_overflow
with pytest.raises(OverflowError, match=msg):
offset_overflow + stamp
with pytest.raises(OverflowError, match=lmsg):
stamp - offset_overflow
# xref https://github.com/pandas-dev/pandas/issues/14080
# used to crash, so check for proper overflow exception
stamp = Timestamp("2000/1/1")
offset_overflow = to_offset("D") * 100 ** 5
with pytest.raises(OverflowError, match=lmsg):
stamp + offset_overflow
with pytest.raises(OverflowError, match=msg):
offset_overflow + stamp
with pytest.raises(OverflowError, match=lmsg):
stamp - offset_overflow
def test_overflow_timestamp_raises(self):
# https://github.com/pandas-dev/pandas/issues/31774
msg = "Result is too large"
a = Timestamp("2101-01-01 00:00:00")
b = Timestamp("1688-01-01 00:00:00")
with pytest.raises(OutOfBoundsDatetime, match=msg):
a - b
# but we're OK for timestamp and datetime.datetime
assert (a - b.to_pydatetime()) == (a.to_pydatetime() - b)
def test_delta_preserve_nanos(self):
val = Timestamp(1337299200000000123)
result = val + timedelta(1)
assert result.nanosecond == val.nanosecond
def test_rsub_dtscalars(self, tz_naive_fixture):
# In particular, check that datetime64 - Timestamp works GH#28286
td = Timedelta(1235345642000)
ts = Timestamp.now(tz_naive_fixture)
other = ts + td
assert other - ts == td
assert other.to_pydatetime() - ts == td
if tz_naive_fixture is None:
assert other.to_datetime64() - ts == td
else:
msg = "subtraction must have"
with pytest.raises(TypeError, match=msg):
other.to_datetime64() - ts
def test_timestamp_sub_datetime(self):
dt = datetime(2013, 10, 12)
ts = Timestamp(datetime(2013, 10, 13))
assert (ts - dt).days == 1
assert (dt - ts).days == -1
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time
# objects
dt = datetime(2014, 3, 4)
td = timedelta(seconds=1)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
ts = Timestamp(dt, freq="D")
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
# GH#22535 add/sub with integers is deprecated
ts + 1
with pytest.raises(TypeError, match=msg):
ts - 1
# Timestamp + datetime not supported, though subtraction is supported
# and yields timedelta more tests in tseries/base/tests/test_base.py
assert type(ts - dt) == Timedelta
assert type(ts + td) == Timestamp
assert type(ts - td) == Timestamp
# Timestamp +/- datetime64 not supported, so not tested (could possibly
# assert error raised?)
td64 = np.timedelta64(1, "D")
assert type(ts + td64) == Timestamp
assert type(ts - td64) == Timestamp
@pytest.mark.parametrize(
"freq, td, td64",
[
("S", timedelta(seconds=1), np.timedelta64(1, "s")),
("min", timedelta(minutes=1), np.timedelta64(1, "m")),
("H", timedelta(hours=1), np.timedelta64(1, "h")),
("D", timedelta(days=1), np.timedelta64(1, "D")),
("W", timedelta(weeks=1), np.timedelta64(1, "W")),
("M", None, np.timedelta64(1, "M")),
],
)
def test_addition_subtraction_preserve_frequency(self, freq, td, td64):
ts = Timestamp("2014-03-05 00:00:00", freq=freq)
original_freq = ts.freq
assert (ts + 1 * original_freq).freq == original_freq
assert (ts - 1 * original_freq).freq == original_freq
if td is not None:
# timedelta does not support months as unit
assert (ts + td).freq == original_freq
assert (ts - td).freq == original_freq
assert (ts + td64).freq == original_freq
assert (ts - td64).freq == original_freq
@pytest.mark.parametrize(
"td", [Timedelta(hours=3), np.timedelta64(3, "h"), timedelta(hours=3)]
)
def test_radd_tdscalar(self, td):
# GH#24775 timedelta64+Timestamp should not raise
ts = Timestamp.now()
assert td + ts == ts + td
@pytest.mark.parametrize(
"other,expected_difference",
[
(np.timedelta64(-123, "ns"), -123),
(np.timedelta64(1234567898, "ns"), 1234567898),
(np.timedelta64(-123, "us"), -123000),
(np.timedelta64(-123, "ms"), -123000000),
],
)
def test_timestamp_add_timedelta64_unit(self, other, expected_difference):
ts = Timestamp(datetime.utcnow())
result = ts + other
valdiff = result.value - ts.value
assert valdiff == expected_difference
@pytest.mark.parametrize(
"ts",
[
Timestamp("1776-07-04", freq="D"),
Timestamp("1776-07-04", tz="UTC", freq="D"),
],
)
@pytest.mark.parametrize(
"other",
[
1,
np.int64(1),
np.array([1, 2], dtype=np.int32),
np.array([3, 4], dtype=np.uint64),
],
)
def test_add_int_with_freq(self, ts, other):
msg = "Addition/subtraction of integers and integer-arrays"
with pytest.raises(TypeError, match=msg):
ts + other
with pytest.raises(TypeError, match=msg):
other + ts
with pytest.raises(TypeError, match=msg):
ts - other
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
other - ts
@pytest.mark.parametrize("shape", [(6,), (2, 3)])
def test_addsub_m8ndarray(self, shape):
# GH#33296
ts = Timestamp("2020-04-04 15:45")
other = np.arange(6).astype("m8[h]").reshape(shape)
result = ts + other
ex_stamps = [ts + Timedelta(hours=n) for n in range(6)]
expected = np.array([x.asm8 for x in ex_stamps], dtype="M8[ns]").reshape(shape)
tm.assert_numpy_array_equal(result, expected)
result = other + ts
tm.assert_numpy_array_equal(result, expected)
result = ts - other
ex_stamps = [ts - Timedelta(hours=n) for n in range(6)]
expected = np.array([x.asm8 for x in ex_stamps], dtype="M8[ns]").reshape(shape)
tm.assert_numpy_array_equal(result, expected)
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
other - ts
@pytest.mark.parametrize("shape", [(6,), (2, 3)])
def test_addsub_m8ndarray_tzaware(self, shape):
# GH#33296
ts = Timestamp("2020-04-04 15:45", tz="US/Pacific")
other = np.arange(6).astype("m8[h]").reshape(shape)
result = ts + other
ex_stamps = [ts + Timedelta(hours=n) for n in range(6)]
expected = np.array(ex_stamps).reshape(shape)
tm.assert_numpy_array_equal(result, expected)
result = other + ts
tm.assert_numpy_array_equal(result, expected)
result = ts - other
ex_stamps = [ts - Timedelta(hours=n) for n in range(6)]
expected = np.array(ex_stamps).reshape(shape)
tm.assert_numpy_array_equal(result, expected)
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
other - ts
| bsd-3-clause |
wvconnors/pysleeg | eegclassy.py | 1 | 1976 | # -*- coding: utf-8 -*-
"""
2/8/17, Will Connors
eegclassy.py - A program to take EDF polysomnography data and phase annotations, format, and create a classifier
"""
import numpy as np
#import pandas
#import io
#import tensorflow as tf
#import matplotlib as plot
class Eeg:
'''a class'''
def __init__(self, f_location):
self.nrecords = 0;
self.duration = 0.0;
self.nsignals = 0;
self.names = [];
self.samplesperrecord = [];
self.table = None;
with open(f_location, encoding='mbcs') as f:
f.seek(236);
self.nrecords = int(f.read(8));
self.duration = float(f.read(8));
self.nsignals = int(f.read(4));
for x in range(self.nsignals):
self.names.append(f.read(16).strip());
f.read(200 * self.nsignals);
for x in range(self.nsignals):
self.samplesperrecord.append(int(f.read(8)));
f.read(32 * self.nsignals);
self.table = np.zeros((self.nsignals, self.nrecords * max(self.samplesperrecord)), dtype=np.int);
for record in range(self.nrecords):
for signal in range(self.nsignals):
for datum in range(self.samplesperrecord[signal]):
self.table[signal,(datum+record*self.samplesperrecord[signal])] = hex2compl(int(f.read(1), 16) * 10000 + int(f.read(1), 16), 16);
print('done! whew');
#pandas.read_table(f_location, delim_whitespace = True, )
def process(self):
return self.nrecords
# dadada;
#
# def crossval(self):
# dadada;
#
# def export(self):
# dadada;
#
def hex2compl(x, n):
if x > pow(2, n) / 2:
x = x - (1 << n)
return x
if __name__ == '__main__':
## run class object sequence
import sys
engine = Eeg(sys.argv[1]);
print('Exit');
| gpl-3.0 |
benjaminy/ManyHands | Client/Source/Crypto/beau/test/plotter.py | 1 | 3473 | import matplotlib.pyplot as plt
import json
ns = []
random_times = []
best_times = []
worst_times = []
timesr = []
min_i = 2
max_i = 27
n_base = 1.3
f = open('./results_with_crypto/nano_array_sorting','r')
input_arr = f.read()
nano_array_sorting = json.loads(input_arr)
f.close()
f = open('./results_with_crypto/nano_array_no_sorting','r')
input_arr = f.read()
nano_array_no_sorting = json.loads(input_arr)
f.close()
f = open('./results_with_crypto/nano_mem_sorting','r')
input_arr = f.read()
nano_mem_sorting = json.loads(input_arr)
f.close()
f = open('./results_with_crypto/nano_mem_no_sorting','r')
input_arr = f.read()
nano_mem_no_sorting = json.loads(input_arr)
f.close()
# -------------------------------------------------------------------- #
f = open('./results_with_crypto/time_array_sorting','r')
input_arr = f.read()
time_array_sorting = json.loads(input_arr)
f.close()
f = open('./results_with_crypto/time_array_no_sorting','r')
input_arr = f.read()
time_array_no_sorting = json.loads(input_arr)
f.close()
f = open('./results_with_crypto/time_mem_sorting','r')
input_arr = f.read()
time_mem_sorting = json.loads(input_arr)
f.close()
f = open('./results_with_crypto/time_mem_no_sorting','r')
input_arr = f.read()
time_mem_no_sorting = json.loads(input_arr)
f.close()
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
f = open('./results_without_crypto/nano_array_sorting','r')
input_arr = f.read()
nano_array_sorting_no_crypto = json.loads(input_arr)
f.close()
f = open('./results_without_crypto/nano_array_no_sorting','r')
input_arr = f.read()
nano_array_no_sorting_no_crypto = json.loads(input_arr)
f.close()
f = open('./results_without_crypto/nano_mem_sorting','r')
input_arr = f.read()
nano_mem_sorting_no_crypto = json.loads(input_arr)
f.close()
f = open('./results_without_crypto/nano_mem_no_sorting','r')
input_arr = f.read()
nano_mem_no_sorting_no_crypto = json.loads(input_arr)
f.close()
# -------------------------------------------------------------------- #
f = open('./results_without_crypto/time_array_sorting','r')
input_arr = f.read()
time_array_sorting_no_crypto = json.loads(input_arr)
f.close()
f = open('./results_without_crypto/time_array_no_sorting','r')
input_arr = f.read()
time_array_no_sorting_no_crypto = json.loads(input_arr)
f.close()
f = open('./results_without_crypto/time_mem_sorting','r')
input_arr = f.read()
time_mem_sorting_no_crypto = json.loads(input_arr)
f.close()
f = open('./results_without_crypto/time_mem_no_sorting','r')
input_arr = f.read()
time_mem_no_sorting_no_crypto = json.loads(input_arr)
f.close()
# plt.plot( nano_array_sorting, time_array_sorting, "red")
# plt.plot( nano_array_no_sorting, time_array_no_sorting, "blue")
plt.plot( nano_mem_sorting, time_mem_sorting, "green")
# plt.plot( nano_mem_no_sorting, time_mem_no_sorting, "black")
# plt.plot( nano_array_sorting_no_crypto, time_array_sorting_no_crypto, "red")
# plt.plot( nano_array_no_sorting_no_crypto, time_array_no_sorting_no_crypto, "blue")
plt.plot( nano_mem_sorting_no_crypto, time_mem_sorting_no_crypto, "black")
# plt.plot( nano_mem_no_sorting_no_crypto, time_mem_no_sorting_no_crypto, "black")
plt.xlabel('number of messages')
plt.ylabel('time (s)')
plt.title('testing message sending')
plt.grid(True)
# plt.savefig("test.png")
plt.show()
| mit |
saiwing-yeung/scikit-learn | examples/model_selection/grid_search_digits.py | 8 | 2760 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.results_['test_mean_score']
stds = clf.results_['test_std_score']
for i in range(len(clf.results_['params'])):
print("%0.3f (+/-%0.03f) for %r"
% (means[i], stds[i] * 2, clf.results_['params'][i]))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
jreback/pandas | pandas/tests/extension/test_period.py | 2 | 4619 | import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas.core.arrays import PeriodArray
from pandas.tests.extension import base
@pytest.fixture
def dtype():
return PeriodDtype(freq="D")
@pytest.fixture
def data(dtype):
return PeriodArray(np.arange(1970, 2070), freq=dtype.freq)
@pytest.fixture
def data_for_twos(dtype):
return PeriodArray(np.ones(100) * 2, freq=dtype.freq)
@pytest.fixture
def data_for_sorting(dtype):
return PeriodArray([2018, 2019, 2017], freq=dtype.freq)
@pytest.fixture
def data_missing(dtype):
return PeriodArray([iNaT, 2017], freq=dtype.freq)
@pytest.fixture
def data_missing_for_sorting(dtype):
return PeriodArray([2018, iNaT, 2017], freq=dtype.freq)
@pytest.fixture
def data_for_grouping(dtype):
B = 2018
NA = iNaT
A = 2017
C = 2019
return PeriodArray([B, B, NA, NA, A, A, B, C], freq=dtype.freq)
@pytest.fixture
def na_value():
return pd.NaT
class BasePeriodTests:
pass
class TestPeriodDtype(BasePeriodTests, base.BaseDtypeTests):
pass
class TestConstructors(BasePeriodTests, base.BaseConstructorsTests):
pass
class TestGetitem(BasePeriodTests, base.BaseGetitemTests):
pass
class TestMethods(BasePeriodTests, base.BaseMethodsTests):
def test_combine_add(self, data_repeated):
# Period + Period is not defined.
pass
class TestInterface(BasePeriodTests, base.BaseInterfaceTests):
pass
class TestArithmeticOps(BasePeriodTests, base.BaseArithmeticOpsTests):
implements = {"__sub__", "__rsub__"}
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
if all_arithmetic_operators in self.implements:
df = pd.DataFrame({"A": data})
self.check_opname(df, all_arithmetic_operators, data[0], exc=None)
else:
# ... but not the rest.
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# we implement substitution...
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None)
else:
# ... but not the rest.
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None)
else:
# ... but not the rest.
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
super()._check_divmod_op(s, op, other, exc=TypeError)
def test_add_series_with_extension_array(self, data):
# we don't implement + for Period
s = pd.Series(data)
msg = (
r"unsupported operand type\(s\) for \+: "
r"\'PeriodArray\' and \'PeriodArray\'"
)
with pytest.raises(TypeError, match=msg):
s + data
def test_error(self):
pass
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box):
# Override to use __sub__ instead of __add__
other = pd.Series(data)
if box is pd.DataFrame:
other = other.to_frame()
result = data.__sub__(other)
assert result is NotImplemented
class TestCasting(BasePeriodTests, base.BaseCastingTests):
pass
class TestComparisonOps(BasePeriodTests, base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
# the base test is not appropriate for us. We raise on comparison
# with (some) integers, depending on the value.
pass
class TestMissing(BasePeriodTests, base.BaseMissingTests):
pass
class TestReshaping(BasePeriodTests, base.BaseReshapingTests):
pass
class TestSetitem(BasePeriodTests, base.BaseSetitemTests):
pass
class TestGroupby(BasePeriodTests, base.BaseGroupbyTests):
pass
class TestPrinting(BasePeriodTests, base.BasePrintingTests):
pass
class TestParsing(BasePeriodTests, base.BaseParsingTests):
@pytest.mark.parametrize("engine", ["c", "python"])
def test_EA_types(self, engine, data):
super().test_EA_types(engine, data)
| bsd-3-clause |
hitszxp/scikit-learn | examples/svm/plot_svm_scale_c.py | 26 | 5353 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `L1` penalty, as well as the `L2` penalty.
L1-penalty case
-----------------
In the `L1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `L1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
L2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `L1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `L2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `L1` case works better on sparse data, while `L2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
# Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# L1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# L2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='L1', loss='L2', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='L2', loss='L2', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
andaag/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 140 | 6926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
vanceeasleaf/aces | aces/runners/shengbte.py | 1 | 28228 | # -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2017-06-13 00:44:48
# @Last Modified by: YangZhou
# @Last Modified time: 2017-06-23 18:53:35
import aces.config as config
from ase import io
from aces.graph import plot, series
import numpy as np
from aces.runners.phonopy import runner as Runner
import pandas as pd
from aces.graph import fig, pl
from aces.tools import passthru, toString, cd,\
to_txt, shell_exec, mkdir, cp, ls
from aces.algorithm.kpoints import filter_along_direction
from aces.io.shengbte import get_w_final, get_qpoints, get_omega, get_tau, get_v
class runner(Runner):
def fc3(self):
self.force_constant3()
def force_constant3(self):
cmd = 'find dirs/dir_3RD.* -name vasprun.xml |sort -n|' + \
config.thirdorder + " reap" + self.getcut()
passthru(cmd)
def getcut(self):
m = self.m
cut = str(m.shengcut / 10.0)
if m.shengcut < 0:
cut = str(m.shengcut)
return " %s %s " % (toString(m.supercell3), cut)
def generate_supercells3(self):
# generate supercells
cmd = config.thirdorder + "sow" + self.getcut()
print(cmd)
passthru(cmd)
def getControl(self):
m = self.m
f = open('CONTROL', 'w')
atoms = io.read('../POSCAR') # m.atoms
elements = m.elements
# shengbte needs nelements <=natoms
if len(elements) > len(atoms):
elements = elements[:len(atoms)]
allocations = """&allocations
\tnelements=%d
\tnatoms=%d
\tngrid(:)=%s
&end
""" % (len(elements), len(atoms), toString(m.kpoints))
cell = atoms.cell
types = toString(
[m.elements.index(x) + 1 for x in atoms.get_chemical_symbols()])
pos = ""
for i, atom in enumerate(atoms):
tu = (i + 1, toString(atoms.get_scaled_positions()[i]))
pos += " positions(:,%d)=%s\n" % tu
crystal = """&crystal
lfactor=0.1,
lattvec(:,1)=%s
lattvec(:,2)=%s
lattvec(:,3)=%s
elements=%s
types=%s
%s
scell(:)=%s
&end
""" % (toString(cell[0]), toString(cell[1]), toString(cell[2]),
' '.join(map(lambda x: '"' + x + '"', elements)), types, pos,
m.dim)
parameters = """¶meters
T=%f
scalebroad=1.0
&end
""" % (m.T)
flags = """
&flags
nonanalytic=.TRUE.
nanowires=.FALSE.
&end
"""
f.write(allocations)
f.write(crystal)
f.write(parameters)
f.write(flags)
f.close()
def sca(self, th=0.0):
qpoints_full = np.loadtxt('BTE.qpoints_full')
ks = qpoints_full[:, 2:4]
f = filter_along_direction(ks, th, eps=0.5)
ids = qpoints_full[:, 1].astype(np.int)[f]
qpoints = np.loadtxt('BTE.qpoints')
idx = qpoints[:, 0].astype(np.int)
u = [list(idx).index(i) for i in ids]
w = np.loadtxt('BTE.w_final')
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
# w[omega<omega.flatten().max()*0.005]=float('nan')
tao = 1.0 / w + 1e-6
rt = tao[u, :3]
rom = omega[u, :3]
data = []
n, m = rom.shape
for i in range(m):
data.append([rom[:, i], rt[:, i], 'b'])
series(
xlabel='Frequency (THz)',
ylabel='Relaxation Time (ps)',
datas=data,
filename='scaling-%f.png' % th,
scatter=True,
legend=False,
logx=True,
logy=True)
def sca1(self):
qpoints_full = np.loadtxt('BTE.qpoints_full')
ks = qpoints_full[:, 2:4]
f = self.norm(ks, 2.3)
ids = qpoints_full[:, 1].astype(np.int)[f]
qpoints = np.loadtxt('BTE.qpoints')
idx = qpoints[:, 0].astype(np.int)
u = [list(idx).index(i) for i in ids]
w = np.loadtxt('BTE.w_final')
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
rt = tao[u, :3]
rom = omega[u, :3]
data = []
n, m = rom.shape
for i in range(m):
data.append([rom[:, i], rt[:, i], 'b'])
series(
xlabel='Frequency (THz)',
ylabel='Relaxation Time (ps)',
datas=data,
filename='norm.png',
scatter=True,
legend=False,
logx=True,
logy=True)
def norm(self, ks, r):
filter = np.abs(np.linalg.norm(ks, axis=1) - r) < 1
return filter
def sca3(self):
qpoints_full = np.loadtxt('BTE.qpoints_full')
ks = qpoints_full[:, 2:4]
f = self.kx(ks, 2.3)
ids = qpoints_full[:, 1].astype(np.int)[f]
qpoints = np.loadtxt('BTE.qpoints')
idx = qpoints[:, 0].astype(np.int)
u = [list(idx).index(i) for i in ids]
w = np.loadtxt('BTE.w_final')
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
rt = tao[u, :3]
rom = omega[u, :3]
data = []
n, m = rom.shape
for i in range(m):
data.append([rom[:, i], rt[:, i], 'b'])
series(
xlabel='Frequency (THz)',
ylabel='Relaxation Time (ps)',
datas=data,
filename='kx.png',
scatter=True,
legend=False,
logx=True,
logy=True)
def kx(self, ks, r):
filter = np.abs(ks[:, 0] - r) < 0.25
return filter
def sca2(self):
w = np.loadtxt('BTE.w_final')
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
rt = tao[50:55, :]
rom = omega[50:55, :]
data = []
n, m = rom.shape
for i in range(n):
data.append([rom[i, :], rt[i, :], 'b'])
series(
xlabel='Frequency (THz)',
ylabel='Relaxation Time (ps)',
datas=data,
filename='k.png',
scatter=True,
legend=False,
logx=True,
logy=True)
def postT(self):
a = np.loadtxt("BTE.KappaTensorVsT_CONV")
with fig('T_kappa.png', legend=True):
ts = a[:, 0]
fil = ts <= 800
k1 = a[fil, 1]
k2 = a[fil, 5]
k3 = a[fil, 9]
ts = a[fil, 0]
pl.plot(ts, k1, lw=2, label="${\kappa_{xx}}$")
pl.plot(ts, k2, lw=2, label="${\kappa_{yy}}$")
pl.plot(ts, k3, lw=2, label="${\kappa_{zz}}$")
pl.xlabel("Tempeature (K)")
pl.ylabel('Thermal Conductivity (W/mK)')
def grtao(self):
cd('T300K')
# 画格林艾森系数与驰豫时间的关系
w = np.loadtxt('BTE.w_final')[:, 1]
w = np.abs(w)
q = np.loadtxt(open('../BTE.qpoints'))
n = len(q)
w = w.T.reshape([-1, n])
w = np.einsum('jk->kj', w)
w.flags.writeable = True
omega = np.loadtxt('../BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
g = np.loadtxt('../BTE.gruneisen')
with fig("gruneisen_tao.png"):
pl.semilogy(
g.flatten(),
tao.flatten(),
ls='.',
marker='.',
color='r',
markersize=10)
pl.ylabel('Relaxation Time (ps)')
pl.xlabel('Gruneisen Coeffecient')
pl.xlim([-10, 5])
pl.ylim([0, 1e4])
def post(self):
cd('T300K')
try:
df = pd.read_csv(
"BTE.kappa_scalar",
sep=r"[ \t]+",
header=None,
names=['step', 'kappa'],
engine='python')
ks = np.array(df['kappa'])
plot(
(np.array(df['step']), 'Iteration Step'),
(ks, 'Thermal Conductivity (W/mK)'),
'kappa_scalar.png',
grid=True,
linewidth=2)
except Exception as e:
print(e)
try:
df = pd.read_csv(
"BTE.cumulative_kappa_scalar",
sep=r"[ \t]+",
header=None,
names=['l', 'kappa'],
engine='python')
ks = np.array(df['kappa'])
plot(
(np.array(df['l']),
'Cutoff Mean Free Path for Phonons (Angstrom)'),
(ks, 'Thermal Conductivity (W/mK)'),
'cumulative_kappa_scalar.png',
grid=True,
linewidth=2,
logx=True)
except Exception as e:
print(e)
try:
omega = np.loadtxt('../BTE.omega') / (2.0 * np.pi)
kappa = np.loadtxt('BTE.kappa')[-1, 1:]
kappa = np.einsum('jji', kappa.reshape([3, 3, -1])) / 3.0
plot(
(np.arange(len(omega[0])), 'Band'),
(kappa, 'Thermal Conductivity (W/mK)'),
'kappa_band.png',
grid=True,
linewidth=2)
plot(
(np.arange(len(omega[0])), 'Band'),
(kappa.cumsum(), 'Thermal Conductivity (W/mK)'),
'cumulative_kappa_band.png',
grid=True,
linewidth=2)
except Exception as e:
print(e)
try:
kappa = np.loadtxt('BTE.cumulative_kappaVsOmega_tensor')
with fig("atc_freq.png"):
pl.plot(kappa[:, 0], kappa[:, 1], label="${\kappa_{xx}}$")
pl.plot(kappa[:, 0], kappa[:, 5], label="${\kappa_{xx}}$")
pl.plot(kappa[:, 0], kappa[:, 9], label="${\kappa_{xx}}$")
pl.xlabel("Frequency (THz)")
pl.ylabel("Cumulative Thermal Conductivity(W/mK)")
with fig("tc_freq.png"):
pl.plot(
kappa[:, 0],
np.gradient(kappa[:, 1]),
label="${\kappa_{xx}}$")
pl.plot(
kappa[:, 0],
np.gradient(kappa[:, 5]),
label="${\kappa_{xx}}$")
pl.plot(
kappa[:, 0],
np.gradient(kappa[:, 9]),
label="${\kappa_{xx}}$")
pl.xlabel("Frequency (THz)")
pl.ylabel("Cumulative Thermal Conductivity(W/mK)")
except Exception as e:
print(e)
try:
g = np.loadtxt('../BTE.gruneisen')
y = (g.flatten(), 'Gruneisen')
plot(
(omega.flatten(), 'Frequency (THz)'),
y,
'gruneisen_freq.png',
grid=True,
scatter=True)
with fig('gruneisen_freq.png'):
pl.scatter(
omega.flatten(), g.flatten(), marker='.', color='r', s=50)
pl.xlabel('Frequency (THz)')
pl.ylabel('Gruneisen Coeffecient')
# pl.grid(True)
pl.xlim([0, omega.max()])
pl.ylim([-10, 5])
# pl.tick_params(axis='both', which='major', labelsize=14)
to_txt(['freq', 'gruneisen'],
np.c_[omega.flatten(), g.flatten()], 'gruneisen_freq.txt')
g = np.loadtxt('../BTE.P3')
with fig('p3_freq.png'):
pl.scatter(
omega.flatten(),
g.flatten() * 1e6,
marker='.',
color='r',
s=50)
pl.xlabel('Frequency (THz)')
pl.ylabel('P3 $(\\times 10^{-6})$')
# pl.grid(True)
pl.xlim([0, omega.max()])
pl.ylim([0, g.max() * 1e6])
to_txt(['freq', 'p3'],
np.c_[omega.flatten(), g.flatten()], 'p3_freq.txt')
except Exception as e:
print(e)
self.draw_gv()
self.draw_branch_scatter()
self.draw_tau()
cd('..')
def draw_gv(self):
try:
omega = get_omega('..')
tau = get_tau('..')
v = get_v('..')
v = np.linalg.norm(v, axis=-1)
y = (v.flatten(), 'Group Velocity (nm/ps)')
plot(
(omega.flatten(), 'Frequency (THz)'),
y,
'v_freq.png',
grid=True,
scatter=True)
to_txt(['freq', 'vg'],
np.c_[omega.flatten(), v.flatten()], 'v_freq.txt')
l = v * tau
y = (l.flatten(), 'Mean Free Path (nm)')
plot(
(omega.flatten(), 'Frequency (THz)'),
y,
'lamda_freq.png',
grid=True,
scatter=True)
to_txt(['freq', 'mfp'],
np.c_[omega.flatten(), l.flatten()], 'lamda_freq.txt')
except Exception as e:
print(e)
def draw_branch_scatter(self):
try:
w = get_w_final('..')
q = get_qpoints('..')
qnorm = np.linalg.norm(q, axis=1)
data = []
n, m = w.shape
for i in range(m):
data.append([qnorm, w[:, i], 'b'])
series(
xlabel='|q| (1/nm)',
ylabel='Scatter Rate (THz)',
datas=data,
filename='branchscatter.png',
scatter=True,
legend=False,
logx=True,
logy=True)
except Exception as e:
pass
def draw_tau(self):
try:
w = get_w_final('..')
q = get_qpoints('..')
omega = get_omega('..')
tau = get_tau('..')
plot(
(omega.flatten(), 'Frequency (THz)'), (w.flatten(),
'Scatter Rate (THz)'),
'scatter_freq.png',
grid=True,
scatter=True,
logy=True)
plot(
(omega.flatten(), 'Frequency (THz)'), (tau.flatten(),
'Relaxation Time (ps)'),
'tau_freq.png',
grid=True,
scatter=True,
logy=True)
to_txt(['freq', 'tau'],
np.c_[omega.flatten(), tau.flatten()], 'tao_freq.txt')
r = []
for i, qq in enumerate(q):
c = tau[i]
d = omega[i]
for j, cc in enumerate(c):
r.append([qq[0], qq[1], qq[2], d[j], c[j]])
to_txt(['q1', 'q2', 'q3', 'f(THz)', 'tao(ps)'], r, 'q_tao.txt')
except Exception as e:
pass
def vtao(self):
# group velocity vs. tao using old version of shengbte
w = np.loadtxt('BTE.w_final')
w = np.abs(w)
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
v = np.loadtxt(open('BTE.v'))
n, m = v.shape
v = v.reshape([n, 3, m / 3])
v = np.linalg.norm(v, axis=1)
l = v * tao
l[l < 1e-6] = None
with fig('tao_v.png'):
pl.semilogy(
v.flatten(),
tao.flatten(),
linestyle='.',
marker='.',
color='r',
markersize=5)
pl.xlabel('Group Velocity (nm/ps)')
pl.ylabel('Relaxation Time (ps)')
pl.grid(True)
with fig('tao_l.png'):
pl.loglog(
l.flatten(),
tao.flatten(),
linestyle='.',
marker='.',
color='r',
markersize=5)
pl.xlabel('Mean Free Path (nm)')
pl.ylabel('Relaxation Time (ps)')
pl.grid(True)
with fig('v_l.png'):
pl.semilogy(
v.flatten(),
l.flatten(),
linestyle='.',
marker='.',
color='r',
markersize=5)
pl.xlabel('Group Velocity (nm/ps)')
pl.ylabel('Mean Free Path (nm)')
pl.grid(True)
def getGrid(self):
s = shell_exec("grep ngrid CONTROL")
from scanf import sscanf
grids = sscanf(s, "ngrid(:)=%d %d %d")
return grids
def getQ(self):
# atoms = io.read('../POSCAR')
# rcell = atoms.get_reciprocal_cell()
grid = self.getGrid()
q0 = []
for ii in range(grid[0]):
for jj in range(grid[1]):
for kk in range(grid[2]):
k = [
float(ii) / grid[0] - .5,
float(jj) / grid[1] - .5,
float(kk) / grid[2] - .5
]
# q0.append(np.einsum('ij,i',rcell,k))
q0.append(k)
return np.array(q0)
def getQFmap(self):
qpoints_full = np.loadtxt('BTE.qpoints_full')
qpoints = np.loadtxt('BTE.qpoints')
ids = qpoints_full[:, 1].astype(np.int)
idx = qpoints[:, 0].astype(np.int)
a = {}
for i, id in enumerate(idx):
a[id] = i
u = np.array([a[i] for i in ids])
return u
def taoth(self):
# tao vs. direction in xy plane using old version of shengbte
w = np.loadtxt('BTE.w_final')
w = np.abs(w)
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
tao[tao > 10000] = 0
tao = np.nan_to_num(tao)
u = self.getQFmap()
tao = tao[u]
# 为了限制q点在BZ,必须自己重新来
# qpoints_full=np.loadtxt('BTE.qpoints_full')
# q=qpoints_full[:,-3:]
q = self.getQ()
with fig('tao_th.png'):
# ax = pl.subplot(111, projection='polar')
N = 100
th = np.linspace(0, 1, N) * np.pi * 2.0 - np.pi
r = np.zeros_like(th)
r1 = np.zeros_like(th)
theta = np.arctan2(q[:, 1], q[:, 0])
for i in np.arange(1):
for j, tt in enumerate(th):
if j == len(th) - 1:
fil = (theta >= tt)
else:
fil = (theta >= tt) * (theta < th[j + 1])
r[j] = np.nan_to_num(tao[fil].mean())
r1[j] = np.nan_to_num(fil.sum())
# c = pl.plot(th, r, lw=2)
# pl.plot(th, r1,lw=2)
# c.set_alpha(0.75)
# pl.semilogy(q[:,0].flatten(),tao[:,i].flatten()
# ,linestyle='.',marker='.',color='r',markersize =5)
pl.grid(True)
def postold(self):
try:
df = pd.read_csv(
"BTE.kappa_scalar",
sep=r"[ \t]+",
header=None,
names=['step', 'kappa'],
engine='python')
ks = np.array(df['kappa'])
plot(
(np.array(df['step']), 'Iteration Step'),
(ks, 'Thermal Conductivity (W/mK)'),
'kappa_scalar.png',
grid=True,
linewidth=2)
except Exception as e:
print(e)
try:
df = pd.read_csv(
"BTE.cumulative_kappa_scalar",
sep=r"[ \t]+",
header=None,
names=['l', 'kappa'],
engine='python')
ks = np.array(df['kappa'])
plot(
(np.array(df['l']),
'Cutoff Mean Free Path for Phonons (Angstrom)'),
(ks, 'Thermal Conductivity (W/mK)'),
'cumulative_kappa_scalar.png',
grid=True,
linewidth=2,
logx=True)
except Exception as e:
print(e)
try:
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
kappa = np.loadtxt('BTE.kappa')[-1, 1:]
kappa = np.einsum('jji', kappa.reshape([3, 3, -1])) / 3.0
plot(
(np.arange(len(omega[0])), 'Band'),
(kappa, 'Thermal Conductivity (W/mK)'),
'kappa_band.png',
grid=True,
linewidth=2)
plot(
(np.arange(len(omega[0])), 'Band'),
(kappa.cumsum(), 'Thermal Conductivity (W/mK)'),
'cumulative_kappa_band.png',
grid=True,
linewidth=2)
except Exception as e:
print(e)
try:
w = np.loadtxt('BTE.w_final')
w = np.abs(w)
w[omega < omega.flatten().max() * 0.005] = float('nan')
plot(
(omega.flatten(), 'Frequency (THz)'), (w.flatten(),
'Scatter Rate (THz)'),
'scatter_freq.png',
grid=True,
scatter=True,
logy=True)
tao = 1.0 / w + 1e-6
with fig('tao_freq.png'):
pl.semilogy(
omega.flatten(),
tao.flatten(),
linestyle='.',
marker='.',
color='r',
markersize=5)
pl.xlabel('Frequency (THz)')
pl.ylabel('Relaxation Time (ps)')
pl.grid(True)
pl.xlim([0, omega.max()])
# pl.ylim([0,tao.flatten().max()])
to_txt(['freq', 'tao'],
np.c_[omega.flatten(), tao.flatten()], 'tao_freq.txt')
except Exception as e:
print(e)
"""
if not exists('relaxtime'):mkdir('relaxtime')
cd('relaxtime')
for i,om in enumerate(omega[:6]):
print "q : ",i
plot((om,'Frequency (THz)'),(tao[i],'Relaxation Time (ps)'),
'tao_freq_q%d.png'%i,grid=True,scatter=True,logx=True,logy=True)
cd('..')
"""
try:
v = np.loadtxt(open('BTE.v'))
n, m = v.shape
v = v.reshape([n, 3, m / 3])
v = np.linalg.norm(v, axis=1)
y = (v.flatten(), 'Group Velocity (nm/ps)')
plot(
(omega.flatten(), 'Frequency (THz)'),
y,
'v_freq.png',
grid=True,
scatter=True)
to_txt(['freq', 'vg'],
np.c_[omega.flatten(), v.flatten()], 'v_freq.txt')
except Exception as e:
print(e)
try:
l = v * tao
l[l < 1e-6] = None
plot(
(omega.flatten(), 'Frequency (THz)'), (l.flatten(),
'Mean Free Path (nm)'),
'lamda_freq.png',
grid=True,
scatter=True,
logy=True,
logx=True,
xmin=0)
to_txt(['freq', 'mfp'],
np.c_[omega.flatten(), l.flatten()], 'lamda_freq.txt')
except Exception as e:
print(e)
try:
q = np.loadtxt(open('BTE.qpoints'))
qnorm = np.linalg.norm(q[:, -3:], axis=1)
data = []
n, m = w.shape
for i in range(m):
data.append([qnorm, w[:, i], 'b'])
series(
xlabel='|q| (1/nm)',
ylabel='Scatter Rate (THz)',
datas=data,
filename='branchscatter.png',
scatter=True,
legend=False,
logx=True,
logy=True)
except Exception as e:
print(e)
def third(self):
mkdir('thirdorder')
cd('thirdorder')
cp('../POSCAR', '.')
self.generate_supercells3()
def vasprun3(self):
files = shell_exec("ls 3RD.*.*|sort -n").split('\n')
assert len(files) > 0
self.getvasprun(files)
def pSecond(self):
cp('../POSCAR', '.')
self.generate_supercells()
files = shell_exec("ls *-*").split('\n')
assert len(files) > 0
self.getvasprun(files)
def generate(self):
# m = self.m
self.minimizePOSCAR()
# cp('minimize/POSCAR','.')
mkdir('secondorder')
cd('secondorder')
self.pSecond()
self.fc2()
cd('..')
self.third()
self.vasprun3()
self.force_constant3()
cd('..')
self.pSheng()
self.runsheng()
def pSheng(self):
mkdir('SHENG')
cd('SHENG')
cp('../secondorder/FORCE_CONSTANTS', 'FORCE_CONSTANTS_2ND')
cp('../thirdorder/FORCE_CONSTANTS_3RD', '.')
self.getControl()
def runold(self):
# Thermal conductivity calculation
m = self.m
print("START SHENGBTE...")
passthru(config.mpirun + " %s " % (m.nodes * m.procs) + config.sheng)
def runsheng(self):
# Thermal conductivity calculation
m = self.m
print("START SHENGBTE...")
passthru(config.mpirun + " %s " % (m.nodes * m.procs) +
config.shengbte)
def kmfp(self):
def ff(p, x):
# return p[0]*(1.0-np.exp(-x**p[2]/p[1]))
return 1.0 / (p[1] / x + 1 / p[0]) - p[2]
# return p[0]*p[1]**x
def fit(x, z, p0, tt):
def errorfunc(p, x, z):
return tt(p, x) - z
from scipy.optimize import leastsq
solp, ier = leastsq(
errorfunc,
p0,
args=(x, z),
Dfun=None,
full_output=False,
ftol=1e-9,
xtol=1e-9,
maxfev=100000,
epsfcn=1e-10,
factor=0.1)
return solp
dirs = ls('shengold*')
from aces.scanf import sscanf
from aces.graph import fig, pl
us = []
for d in dirs:
f = shell_exec('grep ngrid %s/CONTROL' % d)
ks = sscanf(f, " ngrid(:)=%d %d %d")
if (ks[1] != 4):
continue
f = np.loadtxt('%s/BTE.cumulative_kappa_scalar' % d)
us.append([ks, f])
with fig('reduce_mfp.png', legend=True, ncol=1):
for i, u in enumerate(us):
if i < 3:
continue
ks, f = u
x, y = f[:, 0], f[:, 1]
pl.semilogx(x, y, label="Nx= %d " % ks[0], linewidth=2)
ks, f = us[-1]
x, y = f[:, 0], f[:, 1]
# fil=(x>0)
# p=fit(x[fil],y[fil],[1,1,1],ff)
# y1=ff(p,x)
# pl.semilogx(x,y1,label="fit of Nx= %d "%ks[0],linewidth=2)
pl.xlabel('Cutoff Mean Free Path for Phonons (Angstrom)')
pl.ylabel('Thermal Conductivity (W/mK)')
pl.grid(True)
with fig('kappa_inv_mpf_inv.png', legend=True, ncol=1):
ks, f = us[-1]
fil = x > .5
x, y = f[fil, 0], f[fil, 1]
xx = 1 / x
yy = 1 / y
pl.plot(xx, yy, linewidth=3, c='red', label="Nx=1024")
def ll(p, x):
return p[0] * x + p[1]
fil = xx > xx.max() / 4
p = fit(xx[fil], yy[fil], [1, 1, 1], ll)
pl.plot(xx, ll(p, xx), lw=3, ls='dashed', label="Fitted")
pl.xlabel('1/L (1/Angstrom)')
pl.ylabel('$1/\\kappa_L$ (mK/W)')
pl.grid(True)
| gpl-2.0 |
vicky2135/lucious | oscar/lib/python2.7/site-packages/IPython/sphinxext/ipython_directive.py | 6 | 42602 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
Pseudo-Decorators
=================
Note: Only one decorator is supported per input. If more than one decorator
is specified, then only the last one is used.
In addition to the Pseudo-Decorators/options described at the above link,
several enhancements have been made. The directive will emit a message to the
console at build-time if code-execution resulted in an exception or warning.
You can suppress these on a per-block basis by specifying the :okexcept:
or :okwarning: options:
.. code-block:: rst
.. ipython::
:okexcept:
:okwarning:
In [1]: 1/0
In [2]: # raise warning.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import atexit
import errno
import os
import re
import sys
import tempfile
import ast
import warnings
import shutil
# Third-party
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
# Our own
from traitlets.config import Config
from IPython import InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one output, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# Here is where we assume there is, at most, one decorator.
# Might need to rethink this.
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
# The default ipython_rgx* treat the space following the colon as optional.
# However, If the space is there we must consume it or code
# employing the cython_magic extension will fail to execute.
#
# This works with the default ipython_rgx* patterns,
# If you modify them, YMMV.
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None):
self.cout = StringIO()
if exec_lines is None:
exec_lines = []
# Create config object for IPython
config = Config()
config.HistoryManager.hist_file = ':memory:'
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
atexit.register(self.cleanup)
sys.stdout = self.cout
sys.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#sys.stdout = Tee(self.cout, channel='stdout') # dbg
#sys.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.tmp_profile_dir = tmp_profile_dir
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
# this is assigned by the SetUp method of IPythonDirective
# to point at itself.
#
# So, you can access handy things at self.directive.state
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def cleanup(self):
shutil.rmtree(self.tmp_profile_dir, ignore_errors=True)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source (as absolute path for Sphinx)
outfile = '/' + os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# The "rest" is the standard output of the input. This needs to be
# added when in verbatim mode. If there is no "rest", then we don't
# add it, as the new line will be added by the processed output.
ret.append(rest)
# Fetch the processed output. (This is not the submitted output.)
self.cout.seek(0)
processed_output = self.cout.read()
if not is_suppress and not is_semicolon:
#
# In IPythonDirective.run, the elements of `ret` are eventually
# combined such that '' entries correspond to newlines. So if
# `processed_output` is equal to '', then the adding it to `ret`
# ensures that there is a blank line between consecutive inputs
# that have no outputs, as in:
#
# In [1]: x = 4
#
# In [2]: x = 5
#
# When there is processed output, it has a '\n' at the tail end. So
# adding the output to `ret` will provide the necessary spacing
# between consecutive input/output blocks, as in:
#
# In [1]: x
# Out[1]: 5
#
# In [2]: x
# Out[2]: 5
#
# When there is stdout from the input, it also has a '\n' at the
# tail end, and so this ensures proper spacing as well. E.g.:
#
# In [1]: print x
# 5
#
# In [2]: x = 5
#
# When in verbatim mode, `processed_output` is empty (because
# nothing was passed to IP. Sometimes the submitted code block has
# an Out[] portion and sometimes it does not. When it does not, we
# need to ensure proper spacing, so we have to add '' to `ret`.
# However, if there is an Out[] in the submitted code, then we do
# not want to add a newline as `process_output` has stuff to add.
# The difficulty is that `process_input` doesn't know if
# `process_output` will be called---so it doesn't know if there is
# Out[] in the code block. The requires that we include a hack in
# `process_block`. See the comments there.
#
ret.append(processed_output)
elif is_semicolon:
# Make sure there is a newline after the semicolon.
ret.append('')
# context information
filename = "Unknown"
lineno = 0
if self.directive.state:
filename = self.directive.state.document.current_source
lineno = self.directive.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in processed_output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(processed_output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(('-' * 76) + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, processed_output,
is_doctest, decorator, image_file, image_directive)
def process_output(self, data, output_prompt, input_lines, output,
is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
# Recall: `data` is the submitted output, and `output` is the processed
# output from `input_lines`.
TAB = ' ' * 4
if is_doctest and output is not None:
found = output # This is the processed output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
# When in verbatim mode, this holds additional submitted output
# to be written in the final Sphinx output.
# https://github.com/ipython/ipython/issues/5776
out_data = []
is_verbatim = decorator=='@verbatim' or self.is_verbatim
if is_verbatim and data.strip():
# Note that `ret` in `process_block` has '' as its last element if
# the code block was in verbatim mode. So if there is no submitted
# output, then we will have proper spacing only if we do not add
# an additional '' to `out_data`. This is why we condition on
# `and data.strip()`.
# The submitted output has no output prompt. If we want the
# prompt and the code to appear, we need to join them now
# instead of adding them separately---as this would create an
# undesired newline. How we do this ultimately depends on the
# format of the output regex. I'll do what works for the default
# prompt for now, and we might have to adjust if it doesn't work
# in other cases. Finally, the submitted output does not have
# a trailing newline, so we must add it manually.
out_data.append("{0} {1}\n".format(output_prompt, data))
return out_data
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
found_input = False
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
found_input = True
(out_data, input_lines, output, is_doctest,
decorator, image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
if not found_input:
TAB = ' ' * 4
linenumber = 0
source = 'Unavailable'
content = 'Unavailable'
if self.directive:
linenumber = self.directive.state.document.current_line
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
e = ('\n\nInvalid block: Block contains an output prompt '
'without an input prompt.\n\n'
'Document source: {0}\n\n'
'Content begins at line {1}: \n\n{2}\n\n'
'Problematic block within content: \n\n{TAB}{3}\n\n')
e = e.format(source, linenumber, content, block, TAB=TAB)
# Write, rather than include in exception, since Sphinx
# will truncate tracebacks.
sys.stdout.write(e)
raise RuntimeError('An invalid block was detected.')
out_data = \
self.process_output(data, output_prompt, input_lines,
output, is_doctest, decorator,
image_file)
if out_data:
# Then there was user submitted output in verbatim mode.
# We need to remove the last element of `ret` that was
# added in `process_input`, as it is '' and would introduce
# an undesirable newline.
assert(ret[-1] == '')
del ret[-1]
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
savefig_dir = config.ipython_savefig_dir
source_dir = self.state.document.settings.env.srcdir
savefig_dir = os.path.join(source_dir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
try:
os.makedirs(savefig_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend and 'matplotlib.backends' not in sys.modules:
import matplotlib
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
# parts consists of all text within the ipython-block.
# Each part is an input/output block.
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' {0}'.format(line)
for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines) > 2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', 'savefig', 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| bsd-3-clause |
JanNash/sms-tools | software/transformations_interface/sineTransformations_function.py | 25 | 5018 | # function call to the transformation functions of relevance for the sineModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))
import sineModel as SM
import sineTransformations as ST
import utilFunctions as UF
def analysis(inputFile='../../sounds/mridangam.wav', window='hamming', M=801, N=2048, t=-90,
minSineDur=0.01, maxnSines=150, freqDevOffset=20, freqDevSlope=0.02):
"""
Analyze a sound with the sine model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
returns inputFile: input file name; fs: sampling rate of input file,
tfreq, tmag: sinusoidal frequencies and magnitudes
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# compute the sine model of the whole sound
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
# synthesize the sines without original phases
y = SM.sineModelSynth(tfreq, tmag, np.array([]), Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sineModel.wav'
# write the sound resulting from the inverse stft
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the sinusoidal frequencies
if (tfreq.shape[1] > 0):
plt.subplot(3,1,2)
tracks = np.copy(tfreq)
tracks = tracks*np.less(tracks, maxplotfreq)
tracks[tracks<=0] = np.nan
numFrames = int(tracks[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, tracks)
plt.axis([0, x.size/float(fs), 0, maxplotfreq])
plt.title('frequencies of sinusoidal tracks')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
return inputFile, fs, tfreq, tmag
def transformation_synthesis(inputFile, fs, tfreq, tmag, freqScaling = np.array([0, 2.0, 1, .3]),
timeScaling = np.array([0, .0, .671, .671, 1.978, 1.978+1.0])):
"""
Transform the analysis values returned by the analysis function and synthesize the sound
inputFile: name of input file; fs: sampling rate of input file
tfreq, tmag: sinusoidal frequencies and magnitudes
freqScaling: frequency scaling factors, in time-value pairs
timeScaling: time scaling factors, in time-value pairs
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# frequency scaling of the sinusoidal tracks
ytfreq = ST.sineFreqScaling(tfreq, freqScaling)
# time scale the sinusoidal tracks
ytfreq, ytmag = ST.sineTimeScaling(ytfreq, tmag, timeScaling)
# synthesis
y = SM.sineModelSynth(ytfreq, ytmag, np.array([]), Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sineModelTransformation.wav'
UF.wavwrite(y,fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 6))
# frequency range to plot
maxplotfreq = 15000.0
# plot the transformed sinusoidal frequencies
if (ytfreq.shape[1] > 0):
plt.subplot(2,1,1)
tracks = np.copy(ytfreq)
tracks = tracks*np.less(tracks, maxplotfreq)
tracks[tracks<=0] = np.nan
numFrames = int(tracks[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, tracks)
plt.title('transformed sinusoidal tracks')
plt.autoscale(tight=True)
# plot the output sound
plt.subplot(2,1,2)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# analysis
inputFile, fs, tfreq, tmag = analysis()
# transformation and synthesis
transformation_synthesis (inputFile, fs, tfreq, tmag)
plt.show()
| agpl-3.0 |
subodhchhabra/pandashells | pandashells/test/p_regplot_test.py | 10 | 1072 | #! /usr/bin/env python
from mock import patch
from unittest import TestCase
import numpy as np
import pandas as pd
from pandashells.bin.p_regplot import main, make_label
class MakeLabelTests(TestCase):
def test_make_label_html(self):
label = make_label(coeffs=[1, 2, 3], savefig=['test.html'])
self.assertEqual(label, 'y = (3) + (2) x + (1) x ^ 2')
def test_make_label_tex(self):
label = make_label(coeffs=[1, 2], savefig=['test.png'])
self.assertEqual(label, '$y = (2) + (1) x$')
class MainTests(TestCase):
@patch(
'pandashells.bin.p_regplot.sys.argv',
'p.regplot -x x -y y'.split())
@patch('pandashells.bin.p_regplot.io_lib.df_from_input')
@patch('pandashells.bin.p_regplot.plot_lib.show')
def test_cli_non_plain(self, show_mock, df_from_input_mock):
df_in = pd.DataFrame({
'x': np.arange(1, 101),
'y': np.arange(1, 101) + np.random.randn(100)
})
df_from_input_mock.return_value = df_in
main()
self.assertTrue(show_mock.called)
| bsd-2-clause |
gorakhargosh/ThinkStats2 | code/scatter.py | 69 | 4281 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
import numpy as np
import math
import brfss
import thinkplot
import thinkstats2
def GetHeightWeight(df, hjitter=0.0, wjitter=0.0):
"""Get sequences of height and weight.
df: DataFrame with htm3 and wtkg2
hjitter: float magnitude of random noise added to heights
wjitter: float magnitude of random noise added to weights
returns: tuple of sequences (heights, weights)
"""
heights = df.htm3
if hjitter:
heights = thinkstats2.Jitter(heights, hjitter)
weights = df.wtkg2
if wjitter:
weights = thinkstats2.Jitter(weights, wjitter)
return heights, weights
def ScatterPlot(heights, weights, alpha=1.0):
"""Make a scatter plot and save it.
heights: sequence of float
weights: sequence of float
alpha: float
"""
thinkplot.Scatter(heights, weights, alpha=alpha)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def HexBin(heights, weights, bins=None):
"""Make a hexbin plot and save it.
heights: sequence of float
weights: sequence of float
bins: 'log' or None for linear
"""
thinkplot.HexBin(heights, weights, bins=bins)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def MakeFigures(df):
"""Make scatterplots.
"""
sample = thinkstats2.SampleRows(df, 5000)
# simple scatter plot
thinkplot.PrePlot(cols=2)
heights, weights = GetHeightWeight(sample)
ScatterPlot(heights, weights)
# scatter plot with jitter
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(sample, hjitter=1.3, wjitter=0.5)
ScatterPlot(heights, weights)
thinkplot.Save(root='scatter1')
# with jitter and transparency
thinkplot.PrePlot(cols=2)
ScatterPlot(heights, weights, alpha=0.1)
# hexbin plot
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(df, hjitter=1.3, wjitter=0.5)
HexBin(heights, weights)
thinkplot.Save(root='scatter2')
def BinnedPercentiles(df):
"""Bin the data by height and plot percentiles of weight for eachbin.
df: DataFrame
"""
cdf = thinkstats2.Cdf(df.htm3)
print('Fraction between 140 and 200 cm', cdf[200] - cdf[140])
bins = np.arange(135, 210, 5)
indices = np.digitize(df.htm3, bins)
groups = df.groupby(indices)
heights = [group.htm3.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.wtkg2) for i, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(heights, weights, label=label)
thinkplot.Save(root='scatter3',
xlabel='height (cm)',
ylabel='weight (kg)')
def Correlations(df):
print('pandas cov', df.htm3.cov(df.wtkg2))
#print('NumPy cov', np.cov(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Cov', thinkstats2.Cov(df.htm3, df.wtkg2))
print()
print('pandas corr', df.htm3.corr(df.wtkg2))
#print('NumPy corrcoef', np.corrcoef(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Corr', thinkstats2.Corr(df.htm3, df.wtkg2))
print()
print('pandas corr spearman', df.htm3.corr(df.wtkg2, method='spearman'))
print('thinkstats2 SpearmanCorr',
thinkstats2.SpearmanCorr(df.htm3, df.wtkg2))
print('thinkstats2 SpearmanCorr log wtkg3',
thinkstats2.SpearmanCorr(df.htm3, np.log(df.wtkg2)))
print()
print('thinkstats2 Corr log wtkg3',
thinkstats2.Corr(df.htm3, np.log(df.wtkg2)))
print()
def main(script):
thinkstats2.RandomSeed(17)
df = brfss.ReadBrfss(nrows=None)
df = df.dropna(subset=['htm3', 'wtkg2'])
Correlations(df)
return
MakeFigures(df)
BinnedPercentiles(df)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
xuzetan/gemini | gemini/tool_burden_tests.py | 5 | 9907 | import math
from collections import Counter, defaultdict
import numpy as np
from scipy.stats import binom, norm
from pandas import DataFrame
import sys
import random
from itertools import islice
from scipy.misc import comb
import GeminiQuery
def burden_by_gene(args):
"""
calculates per sample the total genetic burden for each gene
"""
query = ("SELECT gene from variants WHERE "
"is_coding=1 and (impact_severity = 'HIGH' or "
"polyphen_pred = 'probably_damaging')")
_summarize_by_gene_and_sample(args, query)
def nonsynonymous_by_gene(args):
"""
calculates per sample the total genetic burden for each gene
"""
query = ("SELECT variant_id, gene from variants WHERE "
"codon_change != 'None'")
_summarize_by_gene_and_sample(args, query)
def get_calpha(args):
"""
Calculate the C-alpha statistic for each gene based on the observed
counts of variants in cases and controls.
From Neale et al, PLoS Genetics, 2011.
http://www.plosgenetics.org/article/info%3Adoi%2F10.1371%2Fjournal.pgen.1001322
"""
db = args.db
if not (args.controls and args.cases):
case, control = _get_case_and_control_samples(args)
else:
case = args.cases
control = args.controls
assert (case and control), ("Phenotypes not found in the database and "
"--cases and --controls are not set.")
samples = control + case
# p_0 = the fraction of samples that are cases (used for weighting)
p_0 = float(len(case)) / float(len(samples))
if args.nonsynonymous:
ns = _nonsynonymous_variants(args)
else:
ns = _medium_or_high_impact_variants(args)
variants_in_gene, variants = _calculate_counts(ns, samples)
header = ["gene", "T", "c", "Z", "p_value"]
print "\t".join(header)
if args.permutations > 0:
perms = permute_cases(samples, args.permutations, case)
for gene in variants_in_gene:
vig = variants_in_gene[gene]
# m = the number of variants observed for this gene
m = len(vig.keys())
# m_n is the number of variants with n copies (i.e., samples with the variant)
#m_n = Counter([len(x) for x in vig.values()])
# n_i is a list reflecting the total number of samples
# having each variant
n_i = [len(x) for x in vig.values()]
# y_i is a list reflecting the total number of __cases__
# having each variant
y_i = [len(filter(lambda a: a in case, x)) for x in vig.values()]
# "The C-alpha test statistic T contrasts the variance of each observed
# count with the expected variance, assuming the binomial distribution."
# In other words, given that we have n total samples and p_0 * n of them
# are cases, we _expect_ the variant copies to be distributed among the
# samples following a binomal distribution. The T statistic contrasts
# the observed count distributions with the expected:
#
# T = SUM{i=(1,m)} [(y_i - n_i*p_0)^2 - n_i*p_0(1 - p_0)]
#
T = _calculate_T(m, p_0, n_i, y_i)
# Calculate the variance of T in order to normalize it
c = _calculate_c(n_i, p_0)
# The final test statistic, Z, id just the original test statistic divided
# by its standard deviation. "We reject the null when Z is larger than expected
# using a one-tailed standard normal distribution for reference.
if c == 0:
Z = np.NaN
p_value = np.NaN
print "\t".join([gene, str(T), str(c), str(Z), str(p_value)])
continue
else:
Z = T / math.sqrt(c)
if args.permutations == 0:
# sf is the survival function ... same as 1 - CDF.
p_value = norm.sf(Z)
else:
# this permutes the cases without replacement, important for
# calculating an exact p-value
T_scores = []
for perm_case in perms:
y_i = [len(filter(lambda a: a in perm_case, x)) for x in vig.values()]
T_permuted = _calculate_T(m, p_0, n_i, y_i)
T_scores.append(T_permuted)
if args.save_tscores:
with open("permutated_t_scores.txt", "a") as out_handle:
out_handle.write("\t".join([gene] + map(str, T_scores)) + "\n")
false_hits = sum([x >= T for x in T_scores])
# the + 1 to make it an unbiased estimator
# Permutation P-values Should Never Be Zero: Calculating Exact
# P-values When Permutations Are Randomly Drawn
# http://www.degruyter.com/view/j/sagmb.2010.9.1/sagmb.2010.9.1.1585/sagmb.2010.9.1.1585.xml
p_value = (float(false_hits) + 1) / (float(args.permutations + 1))
print "\t".join([gene, str(T), str(c), str(Z), str(p_value)])
def permute_cases(samples, permutations, case):
max_permutations = comb(len(samples), len(case))
if permutations > max_permutations:
sys.stderr.write("Permutations set to greater than the maximum number of "
"unique permutations of cases labels. Setting it to "
"%d\n." % (max_permutations))
permutations = max_permutations
perms = take(permutations, unique_permutations(samples, len(case)))
return perms
def unique_permutations(iterable, length):
"""
returns random permutations from an iterable without repeating a set
take(unique_permutations([1,2,3,4,5], 2), 3) => [3,4], [1,6], [3,5]
"""
seen = set()
while True:
element = tuple(sorted(random.sample(iterable, length)))
if element not in seen:
seen.add(element)
yield list(element)
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(islice(iterable, n))
def _get_case_and_control_samples(args):
query = ("SELECT * from samples")
gq = GeminiQuery.GeminiQuery(args.db)
gq.run(query)
cases = []
controls = []
for row in gq:
if int(row["phenotype"]) == 1:
controls.append(row["name"])
elif int(row["phenotype"]) == 2:
cases.append(row["name"])
return cases, controls
def _calculate_c(n_i, p_0):
c = 0.0
singleton_n = 0
for n in n_i:
if n < 2:
singleton_n += n
continue
for u in xrange(n + 1):
c += _C_term(u, n, p_0)
if singleton_n >= 2:
for u in xrange(singleton_n + 1):
c += _C_term(u, singleton_n, p_0)
return c
def _C_term(u, n, p_0):
p_obs_u = binom(n, p_0).pmf(u)
return ((u - n * p_0)**2 - n * p_0 * (1 - p_0))**2 * p_obs_u
def _calculate_T(m, p_0, n_i, y_i):
T = 0.0
singleton_n = 0
singleton_y = 0
for n, y in zip(n_i, y_i):
if n < 2:
singleton_n += n
singleton_y += y
continue
T += _variant_T_term(p_0, n, y)
if singleton_n >= 2:
T += _variant_T_term(p_0, singleton_n, singleton_y)
return T
def _variant_T_term(p_0, n_i, y_i):
return (y_i - n_i * p_0)**2 - n_i * p_0 * (1 - p_0)
def _nonsynonymous_variants(args):
query = ("SELECT variant_id, gene from variants WHERE "
"codon_change != 'None'")
gq = GeminiQuery.GeminiQuery(args.db)
gq.run(query, show_variant_samples=True)
return gq
def _medium_or_high_impact_variants(args):
query = ("SELECT variant_id, gene from variants"
" WHERE impact_severity != 'LOW'"
" AND aaf >= %s"
" AND aaf <= %s" % (str(args.min_aaf), str(args.max_aaf)))
gq = GeminiQuery.GeminiQuery(args.db)
gq.run(query, show_variant_samples=True)
return gq
def _calculate_counts(gq, samples):
variants = defaultdict(Counter)
variants_in_gene = defaultdict(defaultdict)
for row in gq:
gene_name = row['gene']
samples_with_variant = [x for x in row["variant_samples"] if
x in samples]
if not gene_name or not samples_with_variant:
continue
variants_in_gene[gene_name].update({row['variant_id']:
samples_with_variant})
new_counts = Counter(samples_with_variant)
del new_counts['']
variants[gene_name] += new_counts
return variants_in_gene, variants
def _summarize_by_gene_and_sample(args, query):
gq = GeminiQuery.GeminiQuery(args.db)
gq.run(query, show_variant_samples=True)
burden = defaultdict(Counter)
for row in gq:
gene_name = row['gene']
if not gene_name:
continue
new_counts = Counter(row["het_samples"])
# Counter can't do scalar multiplication
new_counts = new_counts + Counter(row["hom_alt_samples"])
new_counts = new_counts + Counter(row["hom_alt_samples"])
del new_counts['']
burden[gene_name] += new_counts
df = DataFrame({})
for gene_name, counts in burden.items():
df = df.append(DataFrame(counts, columns=counts.keys(),
index=[gene_name]))
df = df.replace(np.NaN, 0)
df.to_csv(sys.stdout, float_format="%d", sep="\t", index_label='gene')
def burden(parser, args):
if args.nonsynonymous and not args.calpha:
nonsynonymous_by_gene(args)
elif args.calpha:
get_calpha(args)
else:
burden_by_gene(args)
# unit tests of the underlying calculations
def _test_calculate_C():
nn = [4, 10, 5]
yy = [2, 8, 0]
correct = 15.250000000000007
calc = _calculate_c(nn, 0.5)
assert correct == calc
def _test_calculate_T():
nn = [4, 10, 5]
yy = [2, 8, 0]
correct = 10.5
calc = sum([_variant_T_term(0.5, n, y) for n, y in zip(nn, yy)])
assert correct == calc
| mit |
crackmech/fly-walk | functions.py | 1 | 2324 | import matplotlib.pyplot as plt
#from skimage.io import imread
from keras import backend as K
import numpy as np
def resize_crop_image(image,scale,cutoff_percent):
image = cv2.resize(image,None,fx=scale, fy=scale, interpolation = cv2.INTER_AREA)
cut_off_vals = [image.shape[0]*cutoff_percent/100, image.shape[1]*cutoff_percent/100]
end_vals = [image.shape[0]-int(cut_off_vals[0]),image.shape[1]-int(cut_off_vals[1])]
image =image[int(cut_off_vals[0]):int(end_vals[0]),int(cut_off_vals[1]):int(end_vals[1]) ]
#plt.imshow(image)
#plt.show()
return(image)
def rotate_thrice(square):
return [square, np.rot90(square, 1), np.rot90(square, 2), np.rot90(square, 3)]
def transforms(square):
return rotate_thrice(square) + rotate_thrice(np.fliplr(square))
def your_loss(y_true, y_pred):
#weights = np.ones(4)
#weights = np.array([ 1 , 1, 1, 1])
weights = np.array([ 0.32 , 10, 1.3, 0.06])
#weights = np.array([0.99524712791495196, 0.98911715534979427, 0.015705375514403319])
#weights = np.array([ 0.91640706, 0.5022308, 0.1])
#weights = np.array([ 0.05 , 1.3, 0.55, 4.2])
#weights = np.array([0.00713773, 0.20517703, 0.15813273, 0.62955252])
#weights = np.array([1,,0.1,0.001])
# scale preds so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip
y_pred = K.clip(y_pred, K.epsilon(), 1)
# calc
loss = y_true*K.log(y_pred)*weights
loss =-K.sum(loss,-1)
return loss
def raw_to_labels(image, count):
#assert(image.max()==255)
#if count <= 5:
body = (image[:,:,0]==79) & ( image[:,:,1] ==255) & (image[:,:,2] ==130 )
legs = (image[:,:,0] == 255 ) & ( image[:,:,1] == 0) & (image[:,:,2] == 0)
#else:
# legs = (image[:,:,0]>=150) & ( image[:,:,1] <= 120) & (image[:,:,2] <= 120 )
# body = (image[:,:,0] <= 120 ) & ( image[:,:,1] <= 120) & (image[:,:,2] >= 130 )
antennae = (image[:,:,0] == 255 ) & ( image[:,:,1] == 225) & (image[:,:,2] == 10 )
background = ~legs & ~antennae & ~body
softmax_labeled_image = np.zeros((image.shape[0], image.shape[1], 4))
softmax_labeled_image[body] = [1,0,0,0]
softmax_labeled_image[antennae] = [0,1,0,0]
softmax_labeled_image[legs] = [0,0,1,0]
softmax_labeled_image[background] = [0,0,0,1]
return softmax_labeled_image
| mit |
toobaz/pandas | pandas/plotting/_matplotlib/misc.py | 2 | 12271 | import random
import matplotlib.lines as mlines
import matplotlib.patches as patches
import numpy as np
from pandas.core.dtypes.missing import notna
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.style import _get_standard_colors
from pandas.plotting._matplotlib.tools import _set_ticks_props, _subplots
def scatter_matrix(
frame,
alpha=0.5,
figsize=None,
ax=None,
grid=False,
diagonal="hist",
marker=".",
density_kwds=None,
hist_kwds=None,
range_padding=0.05,
**kwds
):
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = notna(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# GH 14855
kwds.setdefault("edgecolors", "none")
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.0
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in enumerate(df.columns):
for j, b in enumerate(df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == "hist":
ax.hist(values, **hist_kwds)
elif diagonal in ("kde", "density"):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(
df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds
)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j != 0:
ax.yaxis.set_visible(False)
if i != n - 1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _get_marker_compat(marker):
if marker not in mlines.lineMarkers:
return "o"
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
import matplotlib.pyplot as plt
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(
num_colors=len(classes), colormap=colormap, color_type="random", color=color
)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array(
[
(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m)) for i in range(m)]
]
)
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(
to_plot[kls][0],
to_plot[kls][1],
color=colors[i],
label=pprint_thing(kls),
**kwds
)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor="none"))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor="gray"))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(
xy[0] - 0.025, xy[1] - 0.025, name, ha="right", va="top", size="small"
)
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(
xy[0] - 0.025,
xy[1] + 0.025,
name,
ha="right",
va="bottom",
size="small",
)
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(
xy[0] + 0.025, xy[1] - 0.025, name, ha="left", va="top", size="small"
)
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(
xy[0] + 0.025, xy[1] + 0.025, name, ha="left", va="bottom", size="small"
)
ax.axis("equal")
return ax
def andrews_curves(
frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwds
):
import matplotlib.pyplot as plt
def function(amplitudes):
def f(t):
x1 = amplitudes[0]
result = x1 / np.sqrt(2.0)
# Take the rest of the coefficients and resize them
# appropriately. Take a copy of amplitudes as otherwise numpy
# deletes the element from amplitudes itself.
coeffs = np.delete(np.copy(amplitudes), 0)
coeffs.resize(int((coeffs.size + 1) / 2), 2)
# Generate the harmonics and arguments for the sin and cos
# functions.
harmonics = np.arange(0, coeffs.shape[0]) + 1
trig_args = np.outer(harmonics, t)
result += np.sum(
coeffs[:, 0, np.newaxis] * np.sin(trig_args)
+ coeffs[:, 1, np.newaxis] * np.cos(trig_args),
axis=0,
)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-np.pi, np.pi, samples)
used_legends = set()
color_values = _get_standard_colors(
num_colors=len(classes), colormap=colormap, color_type="random", color=color
)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-np.pi, np.pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = f(t)
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(t, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(t, y, color=colors[kls], **kwds)
ax.legend(loc="upper right")
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array(
[(min(sampling) + max(sampling)) * 0.5 for sampling in samplings]
)
if fig is None:
fig = plt.figure()
x = list(range(samples))
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
def parallel_coordinates(
frame,
class_column,
cols=None,
ax=None,
color=None,
use_columns=False,
xticks=None,
colormap=None,
axvlines=True,
axvlines_kwds=None,
sort_labels=False,
**kwds
):
import matplotlib.pyplot as plt
if axvlines_kwds is None:
axvlines_kwds = {"linewidth": 1, "color": "black"}
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set()
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError("Columns must be numeric to be used as xticks")
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError("xticks specified must be numeric")
elif len(xticks) != ncols:
raise ValueError("Length of xticks must match number of columns")
x = xticks
else:
x = list(range(ncols))
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(
num_colors=len(classes), colormap=colormap, color_type="random", color=color
)
if sort_labels:
classes = sorted(classes)
color_values = sorted(color_values)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, **axvlines_kwds)
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc="upper right")
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
import matplotlib.pyplot as plt
kwds.setdefault("c", plt.rcParams["patch.facecolor"])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + {lag})".format(lag=lag))
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = [r(loc) for loc in x]
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle="--", color="grey")
ax.axhline(y=z95 / np.sqrt(n), color="grey")
ax.axhline(y=0.0, color="black")
ax.axhline(y=-z95 / np.sqrt(n), color="grey")
ax.axhline(y=-z99 / np.sqrt(n), linestyle="--", color="grey")
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if "label" in kwds:
ax.legend()
ax.grid()
return ax
| bsd-3-clause |
nilgoyyou/dipy | doc/examples/restore_dti.py | 4 | 7944 | """
=====================================================
Using the RESTORE algorithm for robust tensor fitting
=====================================================
The diffusion tensor model takes into account certain kinds of noise (thermal),
but not other kinds, such as "physiological" noise. For example, if a subject
moves during the acquisition of one of the diffusion-weighted samples, this
might have a substantial effect on the parameters of the tensor fit calculated
in all voxels in the brain for that subject. One of the pernicious consequences
of this is that it can lead to wrong interpretation of group differences. For
example, some groups of participants (e.g. young children, patient groups,
etc.) are particularly prone to motion and differences in tensor parameters and
derived statistics (such as FA) due to motion would be confounded with actual
differences in the physical properties of the white matter. An example of this
was shown in a paper by Yendiki et al. [Yendiki2013]_.
One of the strategies to deal with this problem is to apply an automatic method
for detecting outliers in the data, excluding these outliers and refitting the
model without the presence of these outliers. This is often referred to as
"robust model fitting". One of the common algorithms for robust tensor fitting
is called RESTORE, and was first proposed by Chang et al. [Chang2005]_.
In the following example, we will demonstrate how to use RESTORE on a simulated
dataset, which we will corrupt by adding intermittent noise.
We start by importing a few of the libraries we will use.
"""
import numpy as np
import nibabel as nib
"""
The module ``dipy.reconst.dti`` contains the implementation of tensor fitting,
including an implementation of the RESTORE algorithm.
"""
import dipy.reconst.dti as dti
"""
``dipy.data`` is used for small datasets that we use in tests and examples.
"""
import dipy.data as dpd
"""
``dipy.viz`` package is used for 3D visualization and matplotlib for 2D
visualizations:
"""
from dipy.viz import window, actor
import matplotlib.pyplot as plt
# Enables/disables interactive visualization
interactive = False
"""
If needed, the ``fetch_stanford_hardi`` function will download the raw dMRI
dataset of a single subject. The size of this dataset is 87 MBytes. You only
need to fetch once.
"""
dpd.fetch_stanford_hardi()
img, gtab = dpd.read_stanford_hardi()
"""
We initialize a DTI model class instance using the gradient table used in the
measurement. By default, ``dti.TensorModel`` will use a weighted least-squares
algorithm (described in [Chang2005]_) to fit the parameters of the model. We
initialize this model as a baseline for comparison of noise-corrupted models:
"""
dti_wls = dti.TensorModel(gtab)
"""
For the purpose of this example, we will focus on the data from a region of
interest (ROI) surrounding the Corpus Callosum. We define that ROI as the
following indices:
"""
roi_idx = (slice(20, 50), slice(55, 85), slice(38, 39))
"""
And use them to index into the data:
"""
data = img.get_data()[roi_idx]
"""
This dataset is not very noisy, so we will artificially corrupt it to simulate
the effects of "physiological" noise, such as subject motion. But first, let's
establish a baseline, using the data as it is:
"""
fit_wls = dti_wls.fit(data)
fa1 = fit_wls.fa
evals1 = fit_wls.evals
evecs1 = fit_wls.evecs
cfa1 = dti.color_fa(fa1, evecs1)
sphere = dpd.get_sphere('symmetric724')
"""
We visualize the ODFs in the ROI using ``dipy.viz`` module:
"""
ren = window.Renderer()
ren.add(actor.tensor_slicer(evals1, evecs1, scalar_colors=cfa1, sphere=sphere, scale=0.3))
print('Saving illustration as tensor_ellipsoids_wls.png')
window.record(ren, out_path='tensor_ellipsoids_wls.png', size=(600, 600))
if interactive:
window.show(ren)
"""
.. figure:: tensor_ellipsoids_wls.png
:align: center
Tensor Ellipsoids.
"""
window.clear(ren)
"""
Next, we corrupt the data with some noise. To simulate a subject that moves
intermittently, we will replace a few of the images with a very low signal
"""
noisy_data = np.copy(data)
noisy_idx = slice(-10, None) # The last 10 volumes are corrupted
noisy_data[..., noisy_idx] = 1.0
"""
We use the same model to fit this noisy data
"""
fit_wls_noisy = dti_wls.fit(noisy_data)
fa2 = fit_wls_noisy.fa
evals2 = fit_wls_noisy.evals
evecs2 = fit_wls_noisy.evecs
cfa2 = dti.color_fa(fa2, evecs2)
ren = window.Renderer()
ren.add(actor.tensor_slicer(evals2, evecs2, scalar_colors=cfa2, sphere=sphere, scale=0.3))
print('Saving illustration as tensor_ellipsoids_wls_noisy.png')
window.record(ren, out_path='tensor_ellipsoids_wls_noisy.png', size=(600, 600))
if interactive:
window.show(ren)
"""
In places where the tensor model is particularly sensitive to noise, the
resulting tensor field will be distorted
.. figure:: tensor_ellipsoids_wls_noisy.png
:align: center
Tensor Ellipsoids from noisy data.
To estimate the parameters from the noisy data using RESTORE, we need to
estimate what would be a reasonable amount of noise to expect in the
measurement. To do that, we use the ``dipy.denoise.noise_estimate`` module:
"""
import dipy.denoise.noise_estimate as ne
sigma = ne.estimate_sigma(data)
"""
This estimate of the standard deviation will be used by the RESTORE algorithm
to identify the outliers in each voxel and is given as an input when
initializing the TensorModel object:
"""
dti_restore = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma)
fit_restore_noisy = dti_restore.fit(noisy_data)
fa3 = fit_restore_noisy.fa
evals3 = fit_restore_noisy.evals
evecs3 = fit_restore_noisy.evecs
cfa3 = dti.color_fa(fa3, evecs3)
ren = window.Renderer()
ren.add(actor.tensor_slicer(evals3, evecs3, scalar_colors=cfa3, sphere=sphere, scale=0.3))
print('Saving illustration as tensor_ellipsoids_restore_noisy.png')
window.record(ren, out_path='tensor_ellipsoids_restore_noisy.png', size=(600, 600))
if interactive:
window.show(ren)
"""
.. figure:: tensor_ellipsoids_restore_noisy.png
:align: center
Tensor Ellipsoids from noisy data recovered with RESTORE.
The tensor field looks rather restored to its noiseless state in this
image, but to convince ourselves further that this did the right thing, we will
compare the distribution of FA in this region relative to the baseline, using
the RESTORE estimate and the WLS estimate [Chung2006]_.
"""
fig_hist, ax = plt.subplots(1)
ax.hist(np.ravel(fa2), color='b', histtype='step', label='WLS')
ax.hist(np.ravel(fa3), color='r', histtype='step', label='RESTORE')
ax.hist(np.ravel(fa1), color='g', histtype='step', label='Original')
ax.set_xlabel('Fractional Anisotropy')
ax.set_ylabel('Count')
plt.legend()
fig_hist.savefig('dti_fa_distributions.png')
"""
.. figure:: dti_fa_distributions.png
:align: center
This demonstrates that RESTORE can recover a distribution of FA that more
closely resembles the baseline distribution of the noiseless signal, and
demonstrates the utility of the method to data with intermittent
noise. Importantly, this method assumes that the tensor is a good
representation of the diffusion signal in the data. If you have reason to
believe this is not the case (for example, you have data with very high b
values and you are particularly interested in locations in the brain in which
fibers cross), you might want to use a different method to fit your data.
References
----------
.. [Yendiki2013] Yendiki, A, Koldewynb, K, Kakunooria, S, Kanwisher, N, and
Fischl, B. (2013). Spurious group differences due to head motion in a
diffusion MRI study. Neuroimage.
.. [Chang2005] Chang, L-C, Jones, DK and Pierpaoli, C (2005). RESTORE: robust
estimation of tensors by outlier rejection. MRM, 53: 1088-95.
.. [Chung2006] Chung, SW, Lu, Y, Henry, R-G, (2006). Comparison of bootstrap
approaches for estimation of uncertainties of DTI parameters. NeuroImage 33,
531-541.
.. include:: ../links_names.inc
"""
| bsd-3-clause |
kaslusimoes/SummerSchool2016 | simulation-multiple-variations.py | 1 | 7037 | #! /bin/env python2
# coding: utf-8
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import random as rd
from pickle import dump
class Data:
def __init__(self):
self.m_list1 = []
self.m_list2 = []
N = 100
M = 100
MAX = N + M + 1
MAX_EDGE = 380
MAX_DEG = 450
ITERATIONS = 50000
S1 = 0.
T1 = 1.
S2 = 0.
T2 = 1.
beta = 0.5
NUMGRAPH = 10
NSIM = 10
NAME = "lucasmedeiros"
# initial fraction of cooperators
p1, p2 = .5, .5
# number of cooperators
cc1, cc2 = 0, 0
# fraction of cooperators
r1, r2 = np.zeros(ITERATIONS + 1, dtype=np.float), np.zeros(ITERATIONS + 1, dtype=np.float)
payoff = np.array(
[
[1, S1],
[T1, 0]
]
, dtype=np.float, ndmin=2)
payoff2 = np.array(
[
[1, S2],
[T2, 0]
]
, dtype=np.float, ndmin=2)
def interaction(x, y):
if x < N:
return payoff[g.node[x]['strategy']][g.node[y]['strategy']]
else:
return payoff2[g.node[x]['strategy']][g.node[y]['strategy']]
def change_prob(x, y):
return 1. / (1 + np.exp(-beta * (y - x)))
def complete():
return nx.complete_bipartite_graph(N, M)
def random():
g = nx.Graph()
g.add_nodes_from(np.arange(0, N + M, 1, dtype=np.int))
while g.number_of_edges() < MAX_EDGE:
a, b = rd.randint(0, N - 1), rd.randint(N, N + M - 1)
if b not in g[a]:
g.add_edge(a, b)
return g
def set_initial_strategy(g):
global cc1, cc2
coop = range(0, int(p1 * N), 1) + range(N, int(p2 * M) + N, 1)
cc1 = int(p1 * N)
defect = set(range(0, N + M, 1)) - set(coop)
cc2 = int(p2 * M)
coop = dict(zip(coop, len(coop) * [0]))
defect = dict(zip(defect, len(defect) * [1]))
nx.set_node_attributes(g, 'strategy', coop)
nx.set_node_attributes(g, 'strategy', defect)
def fitness(x):
ret = 0
for i in g.neighbors(x):
ret += interaction(x, i)
return ret
def simulate():
global cc1, cc2
it = 0
while it < ITERATIONS:
it += 1
if it % 2:
a = rd.randint(0, N - 1)
else:
a = rd.randint(N, N + M - 1)
if len(g.neighbors(a)) == 0:
it -= 1
continue
b = g.neighbors(a)[rd.randint(0, len(g.neighbors(a)) - 1)]
b = g.neighbors(b)[rd.randint(0, len(g.neighbors(b)) - 1)]
if a == b:
it -= 1
continue
assert (a < N and b < N) or (a >= N and b >= N)
if g.node[a]['strategy'] != g.node[b]['strategy']:
fa, fb = fitness(a), fitness(b)
l = np.random.random()
p = change_prob(fa, fb)
if l <= p:
if a < N:
if g.node[a]['strategy'] == 0:
cc1 -= 1
else:
cc1 += 1
else:
if g.node[a]['strategy'] == 0:
cc2 -= 1
else:
cc2 += 1
nx.set_node_attributes(g, 'strategy', { a:g.node[b]['strategy'] })
r1[it] = float(cc1) / N
r2[it] = float(cc2) / M
nbins = 10
T1range = np.linspace(1,2,3)
S1range = np.linspace(-1,0,3)
T2range = np.linspace(1,2,nbins)
S2range = np.linspace(-1,0,nbins)
mag1 = np.zeros((nbins, nbins), dtype=np.float)
mag2 = np.zeros((nbins, nbins), dtype=np.float)
for G in xrange(NUMGRAPH):
g = random()
data = Data()
for S1 in S1range:
for T1 in T1range:
global payoff, payoff2
mag1 = np.zeros((nbins, nbins), dtype=np.float)
mag2 = np.zeros((nbins, nbins), dtype=np.float)
i = 0
payoff = np.array([
[1, S1],
[T1, 0]], dtype=np.float, ndmin=2)
for S2 in S2range:
j = 0
for T2 in T2range:
payoff2 = np.array([
[1, S2],
[T2, 0]], dtype=np.float, ndmin=2)
for SS in xrange(NSIM):
set_initial_strategy(g)
simulate()
mag1[i][j] = np.mean(r1[-1000:])
mag2[i][j] = np.mean(r2[-1000:])
j += 1
i += 1
mag1 /= NSIM
mag2 /= NSIM
data.m_list1.append((S1, T1, S2, T2, mag1))
data.m_list2.append((S1, T1, S2, T2, mag2))
f = open('random graph {1} {0}.grph'.format(G, NAME), 'w')
dump(data,f,2)
f.close()
print("Finished Random Graph {0}".format(G))
g = complete()
data = Data()
mag1 = np.zeros((nbins, nbins), dtype=np.float)
mag2 = np.zeros((nbins, nbins), dtype=np.float)
for S1 in S1range:
for T1 in T1range:
global payoff, payoff2
i = 0
payoff = np.array([
[1, S1],
[T1, 0]], dtype=np.float, ndmin=2)
for S2 in S2range:
j = 0
for T2 in T2range:
payoff2 = np.array([
[1, S2],
[T2, 0]], dtype=np.float, ndmin=2)
for SS in xrange(NSIM):
set_initial_strategy(g)
simulate()
mag1[i][j] += np.mean(r1[-1000:])
mag2[i][j] += np.mean(r2[-1000:])
j += 1
i += 1
mag1 /= NSIM
mag2 /= NSIM
data.m_list1.append((S1, T1, S2, T2, mag1))
data.m_list2.append((S1, T1, S2, T2, mag2))
f = open('complete graph {1} {0}.grph'.format(G, NAME), 'w')
dump(data,f,2)
f.close()
print("Finished Complete Graph")
p = './graphs/'
sc_graphs = []
for _,_,c in os.walk(p):
for a,x in enumerate(c):
pp = os.path.join(p,x)
f = open(pp, 'r')
g = load(f)
sc_graphs.append(g)
for G, g in sc_graphs:
data = Data()
mag1 = np.zeros((nbins, nbins), dtype=np.float)
mag2 = np.zeros((nbins, nbins), dtype=np.float)
for S1 in S1range:
for T1 in T1range:
global payoff, payoff2
i = 0
payoff = np.array([
[1, S1],
[T1, 0]], dtype=np.float, ndmin=2)
for S2 in S2range:
j = 0
for T2 in T2range:
payoff2 = np.array([
[1, S2],
[T2, 0]], dtype=np.float, ndmin=2)
for SS in xrange(NSIM):
set_initial_strategy(g)
simulate()
mag1[i][j] += np.mean(r1[-1000:])
mag2[i][j] += np.mean(r2[-1000:])
j += 1
i += 1
mag1 /= NSIM
mag2 /= NSIM
data.m_list1.append((S1, T1, S2, T2, mag1))
data.m_list2.append((S1, T1, S2, T2, mag2))
f = open('scalefree graph {1} {0}.grph'.format(G, NAME), 'w')
dump(data,f,2)
f.close()
print("Finished Graph {0}".format(G))
| apache-2.0 |
zooniverse/aggregation | active_weather/old/paper_threshold.py | 1 | 4407 | __author__ = 'ggdhines'
import cv2
import matplotlib.pyplot as plt
from active_weather import ActiveWeather
import numpy as np
from os import popen
import csv
image = cv2.imread("/home/ggdhines/region.jpg",0)
ret,th1 = cv2.threshold(image,180,255,cv2.THRESH_BINARY)
# plt.imshow(th1)
# plt.show()
# cv2.imwrite("/home/ggdhines/testing1.jpg",th1)
#
th2 = cv2.adaptiveThreshold(image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,151,2)
# plt.imshow(th2)
# plt.show()
cv2.imwrite("/home/ggdhines/testing1.jpg",th2)
cv2.imwrite("/home/ggdhines/testing2.jpg",th2)
# assert False
directory = "/home/ggdhines/Databases/old_weather/aligned_images/Bear/1940/"
# min_x,max_x,min_y,max_y
region_bounds = (559,3282,1276,2097)
project = ActiveWeather()
fname = directory+"Bear-AG-29-1940-0720.JPG"
horizontal_grid,vertical_grid = project.__get_grid_for_table__(directory,region_bounds,fname)
rows = []
columns = []
for row_index in range(len(horizontal_grid)-1):
lb = np.min(horizontal_grid[row_index],axis=0)[1]-region_bounds[2]
ub = np.max(horizontal_grid[row_index+1],axis=0)[1]-region_bounds[2]
rows.append((lb,ub))
print(len(rows))
for column_index in range(len(vertical_grid)-1):
lb = np.min(vertical_grid[column_index],axis=0)[0]-region_bounds[0]
ub = np.max(vertical_grid[column_index+1],axis=0)[0]-region_bounds[0]
columns.append((lb,ub))
#
# plt.imshow(image)
# plt.show()
height,width = image.shape
# plt.imshow(sub_image)
print((width,height))
# cv2.imwrite("/home/ggdhines/region2.jpg",image)
stream = popen("tesseract -psm 6 /home/ggdhines/testing1.jpg stdout makebox")
contents = stream.readlines()
transcribed = []
for row in contents:
try:
c,left,top,right,bottom,_ = row[:-1].split(" ")
except ValueError:
print(row)
raise
top = height - int(top)
bottom = height - int(bottom)
assert top > 0
assert bottom > 0
left = int(left)
right = int(right)
transcribed.append(((bottom,top,left,right),c))
l_y = [top,top,bottom,bottom,top]
l_x = [left,right,right,left,left]
l = np.asarray(zip(l_x,l_y))
# print(l)
cv2.polylines(image,[l],True,(0,255,0))
#
# plt.imshow(image)
# for (lb,ub) in rows:
# l = np.asarray(zip([0,width],[lb,lb]))
# cv2.polylines(sub_image,[l],True,(0,0,255))
# l = np.asarray(zip([0,width],[ub,ub]))
# cv2.polylines(sub_image,[l],True,(0,0,255))
# for (lb,ub) in columns:
# l = np.asarray(zip([lb,lb],[0,height]))
# cv2.polylines(sub_image,[l],True,(0,255,0))
# l = np.asarray(zip([ub,ub],[0,height]))
# cv2.polylines(sub_image,[l],True,(0,255,0))
# for h in horizontal_grid:
# # print(h)
# h = h-(region_bounds[0],region_bounds[2])
# cv2.polylines(sub_image,[h],True,(0,255,255))
# plt.plot(h[:,0]-region_bounds[0],h[:,1]-region_bounds[2])
cv2.imwrite("/home/ggdhines/test.jpg",image)
transcribed_dict = {}
gold_dict = {}
for (top,bottom,left,right),t in transcribed:
if t == None:
continue
in_row = False
for row_index,(lb,ub) in enumerate(rows):
assert top < bottom
in_row = top>=lb and bottom <= ub
if in_row:
break
if not in_row:
continue
in_column = False
for column_index,(lb,ub) in enumerate(columns):
in_column = left>=lb and right <= ub
if in_column:
break
if not in_column:
continue
if (row_index,column_index) not in transcribed_dict:
transcribed_dict[(row_index,column_index)] = [left],[t]
else:
transcribed_dict[(row_index,column_index)][0].append(left)
transcribed_dict[(row_index,column_index)][1].append(t)
gold = project.cass_db.__get_gold_standard__("Bear-AG-29-1940-0720",0,row_index,column_index)
gold_dict[(row_index,column_index)] = gold
# print(row_index,column_index)
#
#
# x = np.asarray([left,left,right,right,left])
# y = np.asarray([top,bottom,bottom,top,top])
# print(t)
# plt.imshow(sub_image)
# plt.plot(x,y)
# plt.show()
total = 0
for k in transcribed_dict:
text_with_coords = zip(transcribed_dict[k][0],transcribed_dict[k][1])
text_with_coords.sort(key = lambda x:x[0])
_,text_list = zip(*text_with_coords)
text = "".join(text_list)
print(text,gold_dict[k],text==gold_dict[k])
if text==gold_dict[k]:
total += 1
print(total) | apache-2.0 |
M-R-Houghton/euroscipy_2015 | bokeh/bokeh/charts/builder/tests/test_horizon_builder.py | 33 | 3440 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import datetime
import unittest
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Horizon
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestHorizon(unittest.TestCase):
def test_supported_input(self):
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(6)]
xyvalues = OrderedDict({'Date': dts})
# Repeat the starting and trailing points in order to
xyvalues['python'] = [-120, -120, -30, 50, 100, 103]
xyvalues['pypy'] = [-75, -75, -33, 15, 126, 126]
xyvaluesdf = pd.DataFrame(xyvalues)
groups = ['python', 'pypy']
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
ts = create_chart(Horizon, _xy, index='Date')
builder = ts._builders[0]
padded_date = [x for x in _xy['Date']]
padded_date.insert(0, padded_date[0])
padded_date.append(padded_date[-1])
self.assertEqual(builder.num_folds, 3)
self.assertEqual(builder._series, groups)
self.assertEqual(builder._fold_height, 126.0 / 3)
self.assertEqual(builder._groups, ['42.0', '-42.0', '84.0', '-84.0', '126.0', '-126.0'])
assert_array_equal(builder._data['x_python'], padded_date)
assert_array_equal(builder._data['x_pypy'], padded_date)
assert_array_equal(builder._data['y_fold-3_python'], [63, 9, 9 ,63, 63, 63, 63, 63])
assert_array_equal(builder._data['y_fold-2_python'], [63, 0, 0, 63, 63, 63, 63, 63])
assert_array_equal(builder._data['y_fold-1_python'], [63, 0, 0, 18, 63, 63, 63, 63])
assert_array_equal(builder._data['y_fold1_python'], [0, 0, 0, 0, 63, 63, 63, 0])
assert_array_equal(builder._data['y_fold2_python'], [0, 0, 0, 0, 12, 63, 63, 0])
assert_array_equal(builder._data['y_fold3_python'], [0, 0, 0, 0, 0, 24, 28.5, 0])
assert_array_equal(builder._data['y_fold-3_pypy'], [126, 126, 126, 126, 126, 126, 126, 126])
assert_array_equal(builder._data['y_fold-2_pypy'], [126, 76.5, 76.5, 126, 126, 126, 126, 126])
assert_array_equal(builder._data['y_fold-1_pypy'], [126, 63, 63, 76.5, 126, 126, 126, 126])
assert_array_equal(builder._data['y_fold1_pypy'], [63, 63, 63, 63, 85.5, 126, 126, 63])
assert_array_equal(builder._data['y_fold2_pypy'], [63, 63, 63, 63, 63, 126, 126, 63])
assert_array_equal(builder._data['y_fold3_pypy'], [63, 63, 63, 63, 63, 126, 126, 63])
| mit |
shikhardb/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 10 | 2760 | import itertools
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors._ball_tree import BallTree
from sklearn.neighbors import DistanceMetric
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_array
from sklearn.utils._testing import _convert_container
rng = np.random.RandomState(10)
V_mahalanobis = rng.rand(3, 3)
V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=rng.random_sample(DIMENSION)),
'wminkowski': dict(p=3, w=rng.random_sample(DIMENSION)),
'mahalanobis': dict(V=V_mahalanobis)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def brute_force_neighbors(X, Y, k, metric, **kwargs):
X, Y = check_array(X), check_array(Y)
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
@pytest.mark.parametrize(
'metric',
itertools.chain(BOOLEAN_METRICS, DISCRETE_METRICS)
)
@pytest.mark.parametrize("array_type", ["list", "array"])
def test_ball_tree_query_metrics(metric, array_type):
rng = check_random_state(0)
if metric in BOOLEAN_METRICS:
X = rng.random_sample((40, 10)).round(0)
Y = rng.random_sample((10, 10)).round(0)
elif metric in DISCRETE_METRICS:
X = (4 * rng.random_sample((40, 10))).round(0)
Y = (4 * rng.random_sample((10, 10))).round(0)
X = _convert_container(X, array_type)
Y = _convert_container(Y, array_type)
k = 5
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
def test_query_haversine():
rng = check_random_state(0)
X = 2 * np.pi * rng.random_sample((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_array_object_type():
"""Check that we do not accept object dtype array."""
X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object)
with pytest.raises(
ValueError,
match="setting an array element with a sequence"
):
BallTree(X)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.